Skip to content
This repository has been archived by the owner on Oct 13, 2021. It is now read-only.

try to enable flake8 checker. #503

Merged
merged 1 commit into from
May 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[flake8]
max-line-length = 120
per-file-ignores =
__init__.py:F401
2 changes: 1 addition & 1 deletion keras2onnx/_graph_cvt.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.
# ==============================================================================
"""Helpers to convert variables to constants in TensorFlow 2.0."""

# flake8: noqa
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
Expand Down
6 changes: 3 additions & 3 deletions keras2onnx/_parser_1x.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,9 @@ def on_parsing_keras_layer(graph, node_list, layer, kenode, model, varset, prefi
for n_, o_ in enumerate(outputs):
oname = prefix + o_.name
k2o_logger().debug('\toutput: ' + oname)
o1 = varset.get_local_variable_or_declare_one(oname, infer_variable_type(o_, varset.target_opset, kenode_output_shapes[n_]))
o1 = varset.get_local_variable_or_declare_one(oname,
infer_variable_type(o_, varset.target_opset,
kenode_output_shapes[n_]))
operator.add_output(o1)

if hasattr(layer, 'output_mask') and layer.output_mask is not None:
Expand All @@ -122,8 +124,6 @@ def on_parsing_keras_layer(graph, node_list, layer, kenode, model, varset, prefi


def build_opdict_from_keras(model):
# type: (keras.Model) -> {}

output_dict = {}
for l_ in model.layers:
if hasattr(l_, 'layers'):
Expand Down
2 changes: 1 addition & 1 deletion keras2onnx/common/data_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@
# license information.
###############################################################################

from onnxconverter_common.data_types import *
from onnxconverter_common.data_types import * # noqa
1 change: 1 addition & 0 deletions keras2onnx/common/onnx_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
# license information.
###############################################################################
import functools
import numpy as np
import onnxconverter_common
from onnx.mapping import NP_TYPE_TO_TENSOR_TYPE
from onnxconverter_common.onnx_ops import * # noqa:
Expand Down
2 changes: 1 addition & 1 deletion keras2onnx/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def _check_layer_converter_availability(sub_model):
else:
layer_type = type(l_)
exist = get_converter(layer_type) or \
layer_type in [keras.layers.InputLayer, keras.layers.wrappers.TimeDistributed]
layer_type in [keras.layers.InputLayer, keras.layers.wrappers.TimeDistributed]

if not exist:
k2o_logger().info("The layer {} doesn't have a specific converter, fall back.".format(str(l_)))
Expand Down
2 changes: 0 additions & 2 deletions keras2onnx/proto/tfcompat.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,14 +36,12 @@ def dump_graph_into_tensorboard(tf_graph):
if is_tf2:
tensorflow = _tf.compat.v1


def is_subclassed(layer):
"""Returns True if the object is a subclassed layer or subclassed model."""
return (layer.__module__.find('keras.engine') == -1 and
layer.__module__.find('keras.layers') == -1)
else:
tensorflow = _tf


def is_subclassed(layer):
return False
9 changes: 6 additions & 3 deletions keras2onnx/topology.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,8 @@ def _check_structure(self):
# A operator has an output, so we remove the operator from the unused-operator list.
unused_operators.discard(operator.full_name)
for variable in operator.input_masks + operator.output_masks:
if variable is None: continue
if variable is None:
continue
# A variable is used by an operator, so we remove the variable from the unused-variable list.
unused_variables.discard(variable.full_name)
# A operator has an output, so we remove the operator from the unused-operator list.
Expand Down Expand Up @@ -236,7 +237,8 @@ def _remove_unused_nodes(nodes, inputs, outputs):
def _build_extra_inputs(container):
# When calling ModelComponentContainer's add_initializer(...), nothing is added into the input list.
# However, In ONNX, for target opset < 9, initializers should also be model's (GraphProto) inputs.
# Thus, we create ValueInfoProto objects from initializers (type: TensorProto) directly and then add them into model's input list.
# Thus, we create ValueInfoProto objects from initializers (type: TensorProto) ...
# ... directly and then add them into model's input list.
extra_inputs = [] # ValueInfoProto list of the initializers
for tensor in container.initializers:
# Sometimes (especially when creating optional input values such as RNN's initial hidden state), an initializer
Expand Down Expand Up @@ -351,7 +353,8 @@ def convert_topology(topology, model_name, doc_string, target_opset, channel_fir
'{} nchw_inputs does not make effect. Please set nchw_inputs to empty.'.format(onnx_not_imported))
k2o_logger().warning('{} so the convertor optimizer is not enabled.'.format(onnx_not_imported))
except Exception as e: # noqa
# either optimizer issue or converter issue, we just let it go to diagnose the issue from the converted model.
# either optimizer issue or converter issue, we just let it go...
# ... so that we can diagnose the issue from the converted model.
k2o_logger().warning(
'There is an error({}) happened during optimizing on the converted model!'.format(type(e)))
k2o_logger().warning(str(e))
Expand Down