Skip to content

Commit

Permalink
Increase minimum Python version to 3.8 (#2306)
Browse files Browse the repository at this point in the history
  • Loading branch information
danieljanes authored Sep 7, 2023
1 parent 6bd2349 commit 3db7849
Show file tree
Hide file tree
Showing 10 changed files with 17 additions and 20 deletions.
2 changes: 1 addition & 1 deletion doc/source/ref-changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@

### Incompatible changes

- **Remove support for Python 3.7** ([#2280](https:/adap/flower/pull/2280), [#2299](https:/adap/flower/pull/2299), [2304](https:/adap/flower/pull/2304))
- **Remove support for Python 3.7** ([#2280](https:/adap/flower/pull/2280), [#2299](https:/adap/flower/pull/2299), [2304](https:/adap/flower/pull/2304), [#2306](https:/adap/flower/pull/2306))

Python 3.7 support was deprecated in Flower 1.5, and this release removes support. Flower now requires Python 3.8.

Expand Down
7 changes: 3 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ classifiers = [
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
Expand Down Expand Up @@ -58,7 +57,7 @@ flower-server = "flwr.server:run_server"
flower-client = "flwr.client:run_client"

[tool.poetry.dependencies]
python = "^3.7"
python = "^3.8"
# Mandatory dependencies
numpy = "^1.21.0"
grpcio = "^1.48.2,!=1.52.0"
Expand Down Expand Up @@ -132,7 +131,7 @@ known_first_party = ["flwr", "flwr_experimental", "flwr_tool"]

[tool.black]
line-length = 88
target-version = ["py37", "py38", "py39", "py310", "py311"]
target-version = ["py38", "py39", "py310", "py311"]

[tool.pylint."MESSAGES CONTROL"]
disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias"
Expand Down Expand Up @@ -178,7 +177,7 @@ wrap-summaries = 88
wrap-descriptions = 88

[tool.ruff]
target-version = "py37"
target-version = "py38"
line-length = 88
select = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"]
fixable = ["D", "E", "F", "W", "B", "ISC", "C4", "UP"]
Expand Down
2 changes: 1 addition & 1 deletion src/py/flwr/common/dp.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
def _get_update_norm(update: NDArrays) -> float:
flattened_update = update[0]
for i in range(1, len(update)):
flattened_update = np.append(flattened_update, update[i]) # type: ignore
flattened_update = np.append(flattened_update, update[i])
return float(np.sqrt(np.sum(np.square(flattened_update))))


Expand Down
4 changes: 2 additions & 2 deletions src/py/flwr/common/parameter.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def ndarray_to_bytes(ndarray: NDArray) -> bytes:
# WARNING: NEVER set allow_pickle to true.
# Reason: loading pickled data can execute arbitrary code
# Source: https://numpy.org/doc/stable/reference/generated/numpy.save.html
np.save(bytes_io, ndarray, allow_pickle=False) # type: ignore
np.save(bytes_io, ndarray, allow_pickle=False)
return bytes_io.getvalue()


Expand All @@ -50,5 +50,5 @@ def bytes_to_ndarray(tensor: bytes) -> NDArray:
# WARNING: NEVER set allow_pickle to true.
# Reason: loading pickled data can execute arbitrary code
# Source: https://numpy.org/doc/stable/reference/generated/numpy.load.html
ndarray_deserialized = np.load(bytes_io, allow_pickle=False) # type: ignore
ndarray_deserialized = np.load(bytes_io, allow_pickle=False)
return cast(NDArray, ndarray_deserialized)
4 changes: 2 additions & 2 deletions src/py/flwr/common/parameter_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ def test_serialisation_deserialisation() -> None:
arr_deserialized = bytes_to_ndarray(arr_serialized)

# Assert deserialized array is equal to original
np.testing.assert_equal(arr_deserialized, arr) # type: ignore
np.testing.assert_equal(arr_deserialized, arr)

# Test false positive
with pytest.raises(AssertionError, match="Arrays are not equal"):
np.testing.assert_equal(arr_deserialized, np.ones((3, 2))) # type: ignore
np.testing.assert_equal(arr_deserialized, np.ones((3, 2)))
8 changes: 3 additions & 5 deletions src/py/flwr/server/strategy/aggregate.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def aggregate_median(results: List[Tuple[NDArrays, int]]) -> NDArrays:

# Compute median weight of each layer
median_w: NDArrays = [
np.median(np.asarray(layer), axis=0) for layer in zip(*weights) # type: ignore
np.median(np.asarray(layer), axis=0) for layer in zip(*weights)
]
return median_w

Expand Down Expand Up @@ -122,14 +122,12 @@ def _compute_distances(weights: List[NDArrays]) -> NDArray:
Input: weights - list of weights vectors
Output: distances - matrix distance_matrix of squared distances between the vectors
"""
flat_w = np.array(
[np.concatenate(p, axis=None).ravel() for p in weights] # type: ignore
)
flat_w = np.array([np.concatenate(p, axis=None).ravel() for p in weights])
distance_matrix = np.zeros((len(weights), len(weights)))
for i, _ in enumerate(flat_w):
for j, _ in enumerate(flat_w):
delta = flat_w[i] - flat_w[j]
norm = np.linalg.norm(delta) # type: ignore
norm = np.linalg.norm(delta)
distance_matrix[i, j] = norm**2
return distance_matrix

Expand Down
2 changes: 1 addition & 1 deletion src/py/flwr/server/strategy/aggregate_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def test_aggregate() -> None:
actual = aggregate(results)

# Assert
np.testing.assert_equal(expected, actual) # type: ignore
np.testing.assert_equal(expected, actual)


def test_weighted_loss_avg_single_value() -> None:
Expand Down
2 changes: 1 addition & 1 deletion src/py/flwr/server/strategy/fedavg_android.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,5 +242,5 @@ def ndarray_to_bytes(self, ndarray: NDArray) -> bytes:
# pylint: disable=R0201
def bytes_to_ndarray(self, tensor: bytes) -> NDArray:
"""Deserialize NumPy array from bytes."""
ndarray_deserialized = np.frombuffer(tensor, dtype=np.float32) # type: ignore
ndarray_deserialized = np.frombuffer(tensor, dtype=np.float32)
return cast(NDArray, ndarray_deserialized)
4 changes: 2 additions & 2 deletions src/py/flwr/server/strategy/fedavgm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def test_aggregate_fit_using_near_one_server_lr_and_no_momentum() -> None:
# Assert
assert actual
for w_act, w_exp in zip(parameters_to_ndarrays(actual), expected):
assert_almost_equal(w_act, w_exp) # type: ignore
assert_almost_equal(w_act, w_exp)


def test_aggregate_fit_server_learning_rate_and_momentum() -> None:
Expand Down Expand Up @@ -136,4 +136,4 @@ def test_aggregate_fit_server_learning_rate_and_momentum() -> None:
# Assert
assert actual
for w_act, w_exp in zip(parameters_to_ndarrays(actual), expected):
assert_almost_equal(w_act, w_exp) # type: ignore
assert_almost_equal(w_act, w_exp)
2 changes: 1 addition & 1 deletion src/py/flwr/server/strategy/qfedavg.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def norm_grad(grad_list: NDArrays) -> float:
# output: square of the L-2 norm
client_grads = grad_list[0]
for i in range(1, len(grad_list)):
client_grads = np.append( # type: ignore
client_grads = np.append(
client_grads, grad_list[i]
) # output a flattened array
squared = np.square(client_grads)
Expand Down

0 comments on commit 3db7849

Please sign in to comment.