Skip to content

Commit

Permalink
Introduce new code format workflow and update existing files (#8)
Browse files Browse the repository at this point in the history
* update and fix linting issues

Signed-off-by: kta-intel <[email protected]>

* fix requirements filename

Signed-off-by: kta-intel <[email protected]>

* remove unused import

Signed-off-by: kta-intel <[email protected]>

* remove unused import

Signed-off-by: kta-intel <[email protected]>

* isort, black, flake8 formatting

Signed-off-by: kta-intel <[email protected]>

---------

Signed-off-by: kta-intel <[email protected]>
  • Loading branch information
kta-intel authored Oct 30, 2024
1 parent 527fc86 commit c762e88
Show file tree
Hide file tree
Showing 21 changed files with 113 additions and 46 deletions.
12 changes: 5 additions & 7 deletions .github/workflows/lint.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This workflow will install Python dependencies, run tests and lint with a single version of Python
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions

name: Lint with Flake8
name: Check code format

on:
pull_request:
Expand All @@ -21,11 +21,9 @@ jobs:
uses: actions/setup-python@v3
with:
python-version: "3.8"
- name: Install dependencies
- name: Install linters
run: |
python -m pip install --upgrade pip
pip install -r requirements-linters.txt
pip install .
- name: Lint with flake8
run: |
flake8 --show-source
pip install -r requirements-linters.txt
- name: Lint using built-in script
run: bash shell/lint.sh
31 changes: 31 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
---
minimum_pre_commit_version: 3.3.3
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: end-of-file-fixer
files: \.py$
- id: requirements-txt-fixer
- id: check-merge-conflict
- id: check-case-conflict
- id: check-json
files: \.json$
- id: check-yaml
files: \.yaml$
- id: debug-statements
- id: pretty-format-json
args:
- --autofix
files: \.json$
- id: trailing-whitespace
files: \.py$
- repo: https://github.com/PyCQA/autoflake
rev: v2.3.1
hooks:
- id: autoflake
args:
- --in-place
- --remove-unused-variables
- --recursive
- --ignore-pass-statements
2 changes: 1 addition & 1 deletion openfl_contrib/__version__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""openfl-contrib version information."""
__version__ = '0.1.0'
__version__ = "0.1.0"
4 changes: 3 additions & 1 deletion openfl_contrib/interface/aggregation_functions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,6 @@

"""Aggregation functions package."""

from openfl_contrib.interface.aggregation_functions.custom_weighted_average import CustomWeightedAverage
from openfl_contrib.interface.aggregation_functions.custom_weighted_average import (
CustomWeightedAverage,
)
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
"""Custom Federated averaging module."""

import numpy as np

from openfl.interface.aggregation_functions.core import AggregationFunction


Expand Down Expand Up @@ -42,4 +41,4 @@ def call(self, local_tensors, *_) -> np.ndarray:

total_weight = sum(weights)
weighted_sum = np.sum([tensor * weight for tensor, weight in zip(tensors, weights)], axis=0)
return weighted_sum / total_weight
return weighted_sum / total_weight
17 changes: 7 additions & 10 deletions openfl_contrib/pipelines/skc_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,9 @@
import gzip as gz

import numpy as np
from openfl.pipelines.pipeline import TransformationPipeline, Transformer
from sklearn import cluster

from openfl.pipelines.pipeline import TransformationPipeline
from openfl.pipelines.pipeline import Transformer


class SparsityTransformer(Transformer):
"""A transformer class to sparsify input data."""
Expand All @@ -36,7 +34,7 @@ def forward(self, data, **kwargs):
sparse_data: a flattened, sparse representation of the input tensor
metadata: dictionary to store a list of meta information.
"""
metadata = {'int_list': list(data.shape)}
metadata = {"int_list": list(data.shape)}
# sparsification
data = data.astype(np.float32)
flatten_data = data.flatten()
Expand All @@ -59,7 +57,7 @@ def backward(self, data, metadata, **kwargs):
recovered_data: an numpy array with original shape.
"""
data = data.astype(np.float32)
data_shape = metadata['int_list']
data_shape = metadata["int_list"]
recovered_data = data.reshape(data_shape)
return recovered_data

Expand Down Expand Up @@ -109,16 +107,15 @@ def forward(self, data, **kwargs):
# clustering
data = data.reshape((-1, 1))
if data.shape[0] >= self.n_cluster:
k_means = cluster.KMeans(
n_clusters=self.n_cluster, n_init=self.n_cluster)
k_means = cluster.KMeans(n_clusters=self.n_cluster, n_init=self.n_cluster)
k_means.fit(data)
quantized_values = k_means.cluster_centers_.squeeze()
indices = k_means.labels_
quant_array = np.choose(indices, quantized_values)
else:
quant_array = data
int_array, int2float_map = self._float_to_int(quant_array)
metadata = {'int_to_float': int2float_map}
metadata = {"int_to_float": int2float_map}
int_array = int_array.reshape(-1)
return int_array, metadata

Expand All @@ -135,7 +132,7 @@ def backward(self, data, metadata, **kwargs):
"""
# convert back to float
data = co.deepcopy(data)
int2float_map = metadata['int_to_float']
int2float_map = metadata["int_to_float"]
for key in int2float_map:
indices = data == key
data[indices] = int2float_map[key]
Expand Down Expand Up @@ -221,6 +218,6 @@ def __init__(self, p_sparsity=0.1, n_clusters=6, **kwargs):
transformers = [
SparsityTransformer(self.p),
KmeansTransformer(self.n_cluster),
GZIPTransformer()
GZIPTransformer(),
]
super(SKCPipeline, self).__init__(transformers=transformers, **kwargs)
2 changes: 1 addition & 1 deletion openfl_contrib_tutorials/ml_to_fl/central/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
torch==2.3.1
torchvision==0.18.1
torchvision==0.18.1
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
# Copyright (C) 2024 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: Apache-2.0
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def load_dataset(data_path, train_split_ratio=0.8, **kwargs):
# Implement dataset loading logic here and return the appropriate data.
# Replace the following placeholders with actual data loading code.
dataset = MNISTDataset(
root=data_path,
root=data_path,
transform=Compose([Grayscale(num_output_channels=1), ToTensor()])
)
n_train = int(train_split_ratio * len(dataset))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
tensorboard
torch==2.3.0
torchvision==0.18
tensorboard
wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
tensorboard
torch==2.3.0
torchvision==0.18
tensorboard
wheel>=0.38.0 # not directly required, pinned by Snyk to avoid a vulnerability
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[tool.black]
line-length = 100

[tool.isort]
profile = "black"
force_single_line = "False"
line_length = 100
16 changes: 3 additions & 13 deletions requirements-linters.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,4 @@
black
flake8
flake8-broken-line
flake8-bugbear
flake8-builtins
flake8-comprehensions
flake8-copyright
flake8-docstrings
flake8-eradicate
flake8-import-order
flake8-import-single
flake8-quotes
flake8-use-fstring
pep8-naming
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
isort
pre-commit
2 changes: 1 addition & 1 deletion requirements-test.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
pytest==8.2.0
pytest==8.2.0
19 changes: 19 additions & 0 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
[flake8]
ignore =
# Conflicts with black
E203
# Line break occurred before a binary operator. Update by W504 Line
W503
# Allow "import torch.nn.functional as F"
N812

per-file-ignores =
# Unused imports in __init__.py are OK
**/__init__.py:F401

exclude =
*_pb2*,

max-line-length = 100

copyright-check = True
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,4 @@
'openfl @ git+https://github.com/securefederatedai/openfl.git@develop',
],
python_requires='>=3.8, <3.12'
)
)
13 changes: 13 additions & 0 deletions shell/format.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash
set -Eeuo pipefail

base_dir=$(dirname $(dirname $0))

# Run the pre-commit checks
pre-commit run --all-files

isort --sp "${base_dir}/pyproject.toml" openfl_contrib

black --config "${base_dir}/pyproject.toml" openfl_contrib

flake8 --config "${base_dir}/setup.cfg" openfl_contrib
13 changes: 13 additions & 0 deletions shell/lint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash
set -Eeuo pipefail

base_dir=$(dirname $(dirname $0))

# Run the pre-commit checks
pre-commit run --all-files

isort --sp "${base_dir}/pyproject.toml" --check openfl_contrib

black --config "${base_dir}/pyproject.toml" --check openfl_contrib

flake8 --config "${base_dir}/setup.cfg" --show-source openfl_contrib
1 change: 0 additions & 1 deletion tests/github/test_task_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@

import os
import time
import socket
import argparse
from pathlib import Path
from subprocess import check_call
Expand Down
3 changes: 1 addition & 2 deletions tests/github/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import os
from pathlib import Path
import re
import tarfile


def create_collaborator(col, workspace_root, data_path, archive_name, fed_workspace):
Expand Down Expand Up @@ -80,4 +79,4 @@ def certify_aggregator(fqdn):
check_call(['fx', 'aggregator', 'generate-cert-request', '--fqdn', fqdn])

# Sign aggregator certificate
check_call(['fx', 'aggregator', 'certify', '--fqdn', fqdn, '--silent'])
check_call(['fx', 'aggregator', 'certify', '--fqdn', fqdn, '--silent'])
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_get_aggregated_tensor_weights(tensor_db):
"""Test that get_aggregated_tensor calculates correctly."""

collaborator_weight_dict = {
'col1': 0.9,
'col1': 0.9,
'col2': 0.1
}

Expand All @@ -45,4 +45,4 @@ def test_get_aggregated_tensor_weights(tensor_db):
axis=0
)

assert np.array_equal(agg_nparray, control_nparray)
assert np.array_equal(agg_nparray, control_nparray)

0 comments on commit c762e88

Please sign in to comment.