Skip to content

Commit

Permalink
make pre-commit happy
Browse files Browse the repository at this point in the history
  • Loading branch information
MridulS committed Sep 25, 2024
1 parent a4a8e4b commit 2c28523
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 2 deletions.
4 changes: 2 additions & 2 deletions _nx_parallel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def get_info():
},
},
"approximate_all_pairs_node_connectivity": {
"url": "https:/networkx/nx-parallel/blob/main/nx_parallel/algorithms/approximation/connectivity.py#L13",
"url": "https:/networkx/nx-parallel/blob/main/nx_parallel/algorithms/approximation/connectivity.py#L14",
"additional_docs": "The parallel implementation first divides the a list of all permutation (in case of directed graphs) and combinations (in case of undirected graphs) of `nbunch` into chunks and then creates a generator to lazily compute the local node connectivities for each chunk, and then employs joblib's `Parallel` function to execute these computations in parallel across `n_jobs` number of CPU cores. At the end, the results are aggregated into a single dictionary and returned.",
"additional_parameters": {
'get_chunks : str, function (default = "chunks")': "A function that takes in `list(iter_func(nbunch, 2))` as input and returns an iterable `pairs_chunks`, here `iter_func` is `permutations` in case of directed graphs and `combinations` in case of undirected graphs. The default is to create chunks by slicing the list into `n_jobs` chunks, such that size of each chunk is atmost 10, and at least 1."
Expand Down Expand Up @@ -119,7 +119,7 @@ def get_info():
},
},
"local_efficiency": {
"url": "https:/networkx/nx-parallel/blob/main/nx_parallel/algorithms/efficiency_measures.py#L10",
"url": "https:/networkx/nx-parallel/blob/main/nx_parallel/algorithms/efficiency_measures.py#L11",
"additional_docs": "The parallel computation is implemented by dividing the nodes into chunks and then computing and adding global efficiencies of all node in all chunks, in parallel, and then adding all these sums and dividing by the total number of nodes at the end.",
"additional_parameters": {
'get_chunks : str, function (default = "chunks")': "A function that takes in a list of all the nodes as input and returns an iterable `node_chunks`. The default chunking is done by slicing the `nodes` into `n_jobs` number of chunks."
Expand Down
1 change: 1 addition & 0 deletions nx_parallel/algorithms/approximation/connectivity.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Parallel implementations of fast approximation for node connectivity"""

import itertools
from joblib import Parallel, delayed
from networkx.algorithms.approximation.connectivity import local_node_connectivity
Expand Down
1 change: 1 addition & 0 deletions nx_parallel/algorithms/efficiency_measures.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Provides functions for computing the efficiency of nodes and graphs."""

import networkx as nx
from joblib import Parallel, delayed
import nx_parallel as nxp
Expand Down

0 comments on commit 2c28523

Please sign in to comment.