Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update to latest mypy and exclude Python 3.6 #776

Merged
merged 3 commits into from
Oct 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion azure-pipelines.yml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ jobs:
- script: |
python -m mypy thinc
displayName: 'Run mypy'
condition: ne(variables['python.version'], '3.10')
condition: ne(variables['python.version'], '3.6')

- task: DeleteFiles@1
inputs:
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ pytest-cov>=2.7.0,<2.8.0
coverage>=5.0.0,<6.0.0
mock>=2.0.0,<3.0.0
flake8>=3.5.0,<3.6.0
mypy>=0.901,<0.970; platform_machine!='aarch64'
mypy>=0.980,<0.990; platform_machine != "aarch64" and python_version >= "3.7"
types-mock>=0.1.1
types-contextvars>=0.1.2; python_version < "3.7"
types-dataclasses>=0.1.3; python_version < "3.7"
Expand Down
26 changes: 13 additions & 13 deletions thinc/backends/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ def pad( # noqa: F811
# array sizes.
length = (length + (round_to - 1)) // round_to * round_to
final_shape = (len(seqs), length) + seqs[0].shape[1:]
output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)
output: Array3d = cast(Array3d, self.alloc(final_shape, dtype=seqs[0].dtype))
for i, arr in enumerate(seqs):
# It's difficult to convince this that the dtypes will match.
output[i, : arr.shape[0]] = arr # type: ignore[assignment, call-overload]
Expand Down Expand Up @@ -451,7 +451,7 @@ def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:
if drop is None or drop <= 0:
return self.xp.ones(shape, dtype="f")
elif drop >= 1.0:
return self.alloc(shape)
return self.alloc_f(shape)
coinflips = self.xp.random.uniform(0.0, 1.0, shape)
mask = (coinflips >= drop) / (1.0 - drop)
return cast(FloatsXd, self.asarray(mask, dtype="float32"))
Expand All @@ -463,7 +463,7 @@ def alloc1f(
dtype: Optional[DTypesFloat] = "float32",
zeros: bool = True,
) -> Floats1d:
return self.alloc((d0,), dtype=dtype, zeros=zeros)
return cast(Floats1d, self.alloc((d0,), dtype=dtype, zeros=zeros))

def alloc2f(
self,
Expand All @@ -473,7 +473,7 @@ def alloc2f(
dtype: Optional[DTypesFloat] = "float32",
zeros: bool = True,
) -> Floats2d:
return self.alloc((d0, d1), dtype=dtype, zeros=zeros)
return cast(Floats2d, self.alloc((d0, d1), dtype=dtype, zeros=zeros))

def alloc3f(
self,
Expand All @@ -484,7 +484,7 @@ def alloc3f(
dtype: Optional[DTypesFloat] = "float32",
zeros: bool = True,
) -> Floats3d:
return self.alloc((d0, d1, d2), dtype=dtype, zeros=zeros)
return cast(Floats3d, self.alloc((d0, d1, d2), dtype=dtype, zeros=zeros))

def alloc4f(
self,
Expand All @@ -496,7 +496,7 @@ def alloc4f(
dtype: Optional[DTypesFloat] = "float32",
zeros: bool = True,
) -> Floats4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype, zeros=zeros)
return cast(Floats4d, self.alloc((d0, d1, d2, d3), dtype=dtype, zeros=zeros))

def alloc_f(
self,
Expand All @@ -505,7 +505,7 @@ def alloc_f(
dtype: Optional[DTypesFloat] = "float32",
zeros: bool = True,
) -> FloatsXd:
return self.alloc(shape, dtype=dtype, zeros=zeros)
return cast(FloatsXd, self.alloc(shape, dtype=dtype, zeros=zeros))

def alloc1i(
self,
Expand All @@ -514,7 +514,7 @@ def alloc1i(
dtype: Optional[DTypesInt] = "int32",
zeros: bool = True,
) -> Ints1d:
return self.alloc((d0,), dtype=dtype, zeros=zeros)
return cast(Ints1d, self.alloc((d0,), dtype=dtype, zeros=zeros))

def alloc2i(
self,
Expand All @@ -524,7 +524,7 @@ def alloc2i(
dtype: Optional[DTypesInt] = "int32",
zeros: bool = True,
) -> Ints2d:
return self.alloc((d0, d1), dtype=dtype, zeros=zeros)
return cast(Ints2d, self.alloc((d0, d1), dtype=dtype, zeros=zeros))

def alloc3i(
self,
Expand All @@ -535,7 +535,7 @@ def alloc3i(
dtype: Optional[DTypesInt] = "int32",
zeros: bool = True,
) -> Ints3d:
return self.alloc((d0, d1, d2), dtype=dtype, zeros=zeros)
return cast(Ints3d, self.alloc((d0, d1, d2), dtype=dtype, zeros=zeros))

def alloc4i(
self,
Expand All @@ -547,7 +547,7 @@ def alloc4i(
dtype: Optional[DTypesInt] = "int32",
zeros: bool = True,
) -> Ints4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype, zeros=zeros)
return cast(Ints4d, self.alloc((d0, d1, d2, d3), dtype=dtype, zeros=zeros))

def alloc_i(
self,
Expand All @@ -556,15 +556,15 @@ def alloc_i(
dtype: Optional[DTypesInt] = "int32",
zeros: bool = True,
) -> IntsXd:
return self.alloc(shape, dtype=dtype, zeros=zeros)
return cast(IntsXd, self.alloc(shape, dtype=dtype, zeros=zeros))

def alloc(
self,
shape: Shape,
*,
dtype: Optional[DTypes] = "float32",
zeros: bool = True,
) -> ArrayT:
) -> ArrayXd:
"""Allocate an array of a certain shape."""
if isinstance(shape, int):
shape = (shape,)
Expand Down
2 changes: 1 addition & 1 deletion thinc/initializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def configure_glorot_uniform_init() -> Callable[[Shape], FloatsXd]:


def zero_init(ops: Ops, shape: Shape) -> FloatsXd:
return ops.alloc(shape)
return ops.alloc_f(shape)


@registry.initializers("zero_init.v1")
Expand Down
2 changes: 1 addition & 1 deletion thinc/layers/hashembed.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def forward(
nV = vectors.shape[0]
nO = vectors.shape[1]
if len(ids) == 0:
output: Floats2d = model.ops.alloc((0, nO), dtype=vectors.dtype)
output: Floats2d = model.ops.alloc2f(0, nO, dtype=vectors.dtype)
else:
ids = model.ops.as_contig(ids, dtype="uint64")
nN = ids.shape[0]
Expand Down
2 changes: 1 addition & 1 deletion thinc/tests/layers/test_mxnet_wrapper.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def answer() -> int:
@pytest.fixture
def X(input_size: int) -> Array2d:
ops: Ops = get_current_ops()
return ops.alloc(shape=(1, input_size))
return cast(Array2d, ops.alloc(shape=(1, input_size)))


@pytest.fixture
Expand Down
4 changes: 2 additions & 2 deletions thinc/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ def __bytes__(self) -> bytes: ...
def __str__(self) -> str: ...
def __repr__(self) -> str: ...
def __copy__(self, order: str = ...): ...
def __deepcopy__(self, memo: dict) -> ArrayT: ...
def __deepcopy__(self: SelfT, memo: dict) -> SelfT: ...
def __lt__(self, other): ...
def __le__(self, other): ...
def __eq__(self, other): ...
Expand Down Expand Up @@ -224,7 +224,7 @@ def clip(self, a_min: Any, a_max: Any, out: Optional[ArrayT]) -> ArrayT: ...
def max(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
# def mean(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[SelfT] = None, keepdims: bool = False) -> "Array": ...
def min(self, axis: int = -1, out: Optional[ArrayT] = None) -> ArrayT: ...
def nonzero(self) -> ArrayT: ...
def nonzero(self: SelfT) -> SelfT: ...
def prod(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
def round(self, decimals: int = 0, out: Optional[ArrayT] = None) -> ArrayT: ...
# def sum(self, axis: int = -1, dtype: Optional[DTypes] = None, out: Optional[ArrayT] = None, keepdims: bool = False) -> ArrayT: ...
Expand Down