From 0520731d9e24d9e47669d362da291974cd83ad24 Mon Sep 17 00:00:00 2001 From: Martin Hoyer Date: Tue, 13 Aug 2024 19:20:36 +0200 Subject: [PATCH] pre-commit hooks update; mypy, ruff fixes (#3142) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update pre-commit hooks, update Ruff, enable flynt and refurb ruff rules and address newly found errors Removing explicit container_field overloads for ConainerClass and ContainerInstance to have a unified return output. According to mypy, it would never got to the second instance of overloaded container_field, as the first one is the same or broader. Replacing regex flag aliases with full name to pass FURB167. Removing one .readlines() usage for efficiency (FURB129). Flynt f-strings check passes, ignoring tests/unit dir. Co-authored-by: Miloš Prchlík --- .pre-commit-config.yaml | 14 +++++------ docs/scripts/generate-plugins.py | 11 ++++----- docs/templates/plugins.rst.j2 | 2 +- pyproject.toml | 4 ++++ tests/unit/test_utils.py | 5 ++-- tmt/convert.py | 40 ++++++++++++++++---------------- tmt/export/__init__.py | 2 +- tmt/export/nitrate.py | 4 ++-- tmt/log.py | 1 + tmt/steps/__init__.py | 4 ++-- tmt/steps/prepare/__init__.py | 7 ++++-- tmt/steps/prepare/distgit.py | 4 ++-- tmt/steps/provision/__init__.py | 2 +- tmt/utils/__init__.py | 39 ++++++++----------------------- 14 files changed, 63 insertions(+), 76 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 001dd1d74b..f27a6ad9a1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,7 +2,7 @@ # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/hhatto/autopep8 - rev: 'v2.3.0' + rev: 'v2.3.1' hooks: - id: autopep8 @@ -21,7 +21,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.10.0" + rev: "v1.11.1" hooks: - id: mypy language_version: "3.9" @@ -67,7 +67,7 @@ repos: args: [--config-file=pyproject.toml] - repo: https://github.com/RobertCraigie/pyright-python - rev: v1.1.367 + rev: v1.1.371 hooks: - id: pyright language_version: "3.9" @@ -110,7 +110,7 @@ repos: - "types-docutils" - repo: https://github.com/python-jsonschema/check-jsonschema - rev: "0.28.5" + rev: "0.29.1" hooks: - id: check-metaschema name: "Check JSON schemas validity" @@ -123,7 +123,7 @@ repos: files: ^tmt/schemas/.*\.yaml - repo: https://github.com/ansible-community/ansible-lint.git - rev: v24.6.0 + rev: v24.7.0 hooks: - id: ansible-lint args: @@ -139,7 +139,7 @@ repos: # in order to be parsed by ansible-lint - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.4.9 + rev: v0.5.7 hooks: - id: ruff args: @@ -147,7 +147,7 @@ repos: - '--show-fixes' - repo: https://github.com/teemtee/tmt.git - rev: 1.33.0 + rev: 1.35.0 hooks: - id: tmt-lint additional_dependencies: diff --git a/docs/scripts/generate-plugins.py b/docs/scripts/generate-plugins.py index 25e6b97948..921949430d 100755 --- a/docs/scripts/generate-plugins.py +++ b/docs/scripts/generate-plugins.py @@ -44,10 +44,7 @@ def _is_ignored( if metadata.internal is True: return True - if hasattr(container, '_OPTIONLESS_FIELDS') and field.name in container._OPTIONLESS_FIELDS: - return True - - return False + return hasattr(container, '_OPTIONLESS_FIELDS') and field.name in container._OPTIONLESS_FIELDS def _is_inherited( @@ -67,7 +64,7 @@ def container_ignored_fields(container: ContainerClass) -> list[str]: field_names: list[str] = [] for field in tmt.utils.container_fields(container): - _, _, _, metadata = tmt.utils.container_field(container, field.name) + _, _, _, _, metadata = tmt.utils.container_field(container, field.name) if _is_ignored(container, field, metadata): field_names.append(field.name) @@ -81,7 +78,7 @@ def container_inherited_fields(container: ContainerClass) -> list[str]: field_names: list[str] = [] for field in tmt.utils.container_fields(container): - _, _, _, metadata = tmt.utils.container_field(container, field.name) + _, _, _, _, metadata = tmt.utils.container_field(container, field.name) if _is_inherited(container, field, metadata): field_names.append(field.name) @@ -95,7 +92,7 @@ def container_intrinsic_fields(container: ContainerClass) -> list[str]: field_names: list[str] = [] for field in tmt.utils.container_fields(container): - _, _, _, metadata = tmt.utils.container_field(container, field.name) + _, _, _, _, metadata = tmt.utils.container_field(container, field.name) if _is_ignored(container, field, metadata): continue diff --git a/docs/templates/plugins.rst.j2 b/docs/templates/plugins.rst.j2 index a7f869b9a4..74137aa787 100644 --- a/docs/templates/plugins.rst.j2 +++ b/docs/templates/plugins.rst.j2 @@ -1,7 +1,7 @@ :tocdepth: 0 {% macro render_field(plugin_id, plugin_data_class, field_name) %} - {% set _, option, _, metadata = container_field(plugin_data_class, field_name) %} + {% set _, option, _, _, metadata = container_field(plugin_data_class, field_name) %} {% if metadata.metavar %} {{ option }}: ``{{ metadata.metavar }}`` diff --git a/pyproject.toml b/pyproject.toml index 85a6428e35..f8e206d314 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -318,6 +318,8 @@ lint.select = [ "PLC", # pylint-convention "PLE", # pylint-error "PLR", # pylint-refactor + "FLY", # flynt + "FURB", # refurb "RUF", # ruff "D", # pydocstyle ] @@ -337,6 +339,7 @@ lint.ignore = [ "S603", # `subprocess` call: check for execution of untrusted input "S607", # Starting a process with a partial executable path "S105", # Possible hardcoded password assigned to: "PASS" + "SIM103", # Return the condition directly - can hurt readability # pydocstyle # TODO: the permanent list (drop this comment once the temporary list @@ -372,6 +375,7 @@ lint.typing-modules = ["tmt._compat.typing"] "S605", # Starting a process with a shell: seems safe, but may be changed in the future "S318", # Using xml to parse untrusted data is known to be vulnerable to XML attacks "S108", # Probable insecure usage of temporary file or directory: "{}" + "FLY002", # Use f-string instead of .join ] # The purpose of tmt/_compat is to be used with TID251 (banned imports) "tmt/_compat/**.py" = ["TID251"] diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py index 5111f80d11..1e73fcff4c 100644 --- a/tests/unit/test_utils.py +++ b/tests/unit/test_utils.py @@ -779,8 +779,9 @@ def test_fedora_dist_git(tmppath): (tmppath / 'sources').write_text('SHA512 (fn-1.tar.gz) = 09af\n') (tmppath / 'tmt.spec').write_text('') fedora_sources_obj = tmt.utils.FedoraDistGit() - assert [("https://src.fedoraproject.org/repo/pkgs/rpms/tmt/fn-1.tar.gz/sha512/09af/fn-1.tar.gz", - "fn-1.tar.gz")] == fedora_sources_obj.url_and_name(cwd=tmppath) + assert fedora_sources_obj.url_and_name(cwd=tmppath) == [( + "https://src.fedoraproject.org/repo/pkgs/rpms/tmt/fn-1.tar.gz/sha512/09af/fn-1.tar.gz", + "fn-1.tar.gz")] class TestValidateGitStatus: diff --git a/tmt/convert.py b/tmt/convert.py index b40da61bd3..2b64c02e8e 100644 --- a/tmt/convert.py +++ b/tmt/convert.py @@ -83,8 +83,8 @@ def read_manual( new_cwd.mkdir(exist_ok=True) os.chdir(new_cwd) - for case_id in case_ids: - testcase = nitrate.TestCase(case_id) + for cid in case_ids: + testcase = nitrate.TestCase(cid) if testcase.status.name != 'CONFIRMED' and not disabled: log.debug(f'{testcase.identifier} skipped (testcase is not CONFIRMED).') continue @@ -228,7 +228,7 @@ def read_datafile( testinfo = datafile # Beaker task name - search_result = re.search(regex_task, testinfo, re.M) + search_result = re.search(regex_task, testinfo, re.MULTILINE) if search_result is None: raise ConvertError("Unable to parse 'Name' from testinfo.desc.") beaker_task = search_result.group(1).strip() @@ -237,13 +237,13 @@ def read_datafile( data['extra-summary'] = beaker_task # Summary - search_result = re.search(regex_summary, testinfo, re.M) + search_result = re.search(regex_summary, testinfo, re.MULTILINE) if search_result is not None: data['summary'] = search_result.group(1).strip() echo(style('summary: ', fg='green') + data['summary']) # Test script - search_result = re.search(regex_test, datafile_test, re.M) + search_result = re.search(regex_test, datafile_test, re.MULTILINE) if search_result is None: if filename == 'metadata': # entry_point property is optional. When absent 'make run' is used. @@ -271,7 +271,7 @@ def read_datafile( with open(makefile_path, encoding='utf-8') as makefile_file: makefile = makefile_file.read() search_result = \ - re.search(makefile_regex_test, makefile, re.M) + re.search(makefile_regex_test, makefile, re.MULTILINE) except OSError: raise ConvertError("Makefile is missing.") # Retrieve the path to the test file from the Makefile @@ -280,7 +280,7 @@ def read_datafile( # Read the test file and determine the framework used. if test_path: with open(test_path, encoding="utf-8") as test_file: - if re.search("beakerlib", test_file.read(), re.M): + if re.search("beakerlib", test_file.read(), re.MULTILINE): data["framework"] = "beakerlib" else: data["framework"] = "shell" @@ -291,28 +291,28 @@ def read_datafile( raise ConvertError(f"Unable to open '{test_path}'.") # Contact - search_result = re.search(regex_contact, testinfo, re.M) + search_result = re.search(regex_contact, testinfo, re.MULTILINE) if search_result is not None: data['contact'] = search_result.group(1).strip() echo(style('contact: ', fg='green') + data['contact']) if filename == 'Makefile': # Component - search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.M) + search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.MULTILINE) if search_result is not None: data['component'] = search_result.group(1).split() echo(style('component: ', fg='green') + ' '.join(data['component'])) # Duration - search_result = re.search(regex_duration, testinfo, re.M) + search_result = re.search(regex_duration, testinfo, re.MULTILINE) if search_result is not None: data['duration'] = search_result.group(1).strip() echo(style('duration: ', fg='green') + data['duration']) if filename == 'Makefile': # Environment - variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.M) + variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.MULTILINE) if variables: data['environment'] = {} for variable in variables: @@ -334,7 +334,7 @@ def sanitize_name(name: str) -> str: return name # RhtsRequires or repoRequires (optional) goes to require - requires = re.findall(regex_require, testinfo, re.M) + requires = re.findall(regex_require, testinfo, re.MULTILINE) if requires: data['require'] = [ sanitize_name(require.strip()) for line in requires @@ -342,7 +342,7 @@ def sanitize_name(name: str) -> str: echo(style('require: ', fg='green') + ' '.join(data['require'])) # Requires or softDependencies (optional) goes to recommend - recommends = re.findall(regex_recommend, testinfo, re.M) + recommends = re.findall(regex_recommend, testinfo, re.MULTILINE) if recommends: data['recommend'] = [ sanitize_name(recommend.strip()) for line in recommends @@ -352,7 +352,7 @@ def sanitize_name(name: str) -> str: if filename == 'Makefile': # Convert Type into tags - search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.M) + search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.MULTILINE) if search_result is not None: makefile_type = search_result.group(1).strip() if 'all' in [type_.lower() for type_ in types]: @@ -364,7 +364,7 @@ def sanitize_name(name: str) -> str: echo(style("tag: ", fg="green") + " ".join(tags)) data["tag"] = tags # Add relevant bugs to the 'link' attribute - for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.M): + for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.MULTILINE): for bug in re.findall(r'(\d+)', bug_line): add_link(bug, data, SYSTEM_BUGZILLA) @@ -519,7 +519,7 @@ def target_content_run() -> list[str]: if '\\\n' in datafile: datafile_test = re.sub(r'\\\n', newline_stub, datafile) regexp = r'^run:.*\n\t(.*)$' - search_result = re.search(regexp, datafile_test, re.M) + search_result = re.search(regexp, datafile_test, re.MULTILINE) if search_result is None: # Target not found in the Makefile return [] @@ -533,7 +533,7 @@ def target_content_run() -> list[str]: def target_content_build() -> list[str]: """ Extract lines from the build content """ regexp = r'^build:.*\n((?:\t[^\n]*\n?)*)' - search_result = re.search(regexp, datafile, re.M) + search_result = re.search(regexp, datafile, re.MULTILINE) if search_result is None: # Target not found in the Makefile return [] @@ -760,7 +760,7 @@ def read_tier(tag: str, data: NitrateDataType) -> None: Check for the tier attribute, if there are multiple TierX tags, pick the one with the lowest index. """ - tier_match = re.match(r'^Tier ?(?P\d+)$', tag, re.I) + tier_match = re.match(r'^Tier ?(?P\d+)$', tag, re.IGNORECASE) if tier_match: num = tier_match.group('num') if 'tier' in data: @@ -984,12 +984,12 @@ def extract_relevancy( return None # Fallback to the original relevancy syntax # The relevancy definition begins with the header - matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.I + re.M + re.S) + matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.IGNORECASE + re.MULTILINE + re.DOTALL) if not matched: return None relevancy = matched.groups()[0] # Remove possible additional text after an empty line - matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.S) + matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.DOTALL) if matched: relevancy = matched.groups()[0] return relevancy.strip() diff --git a/tmt/export/__init__.py b/tmt/export/__init__.py index 3820668018..0c2c74d5a4 100644 --- a/tmt/export/__init__.py +++ b/tmt/export/__init__.py @@ -379,7 +379,7 @@ def check_md_file_respects_spec(md_path: Path) -> list[str]: html_headings_from_file = [i[0] for i in re.findall('(^(.+?)$)', md_to_html, - re.M)] + re.MULTILINE)] # No invalid headings in the file w/o headings if not html_headings_from_file: diff --git a/tmt/export/nitrate.py b/tmt/export/nitrate.py index 862c6e6334..981d45967e 100644 --- a/tmt/export/nitrate.py +++ b/tmt/export/nitrate.py @@ -263,7 +263,7 @@ def return_markdown_file() -> Optional[Path]: """ Return path to the markdown file """ files = '\n'.join(os.listdir()) reg_exp = r'.+\.md$' - md_files = re.findall(reg_exp, files, re.M) + md_files = re.findall(reg_exp, files, re.MULTILINE) fail_message = ("in the current working directory.\n" "Manual steps couldn't be exported") if len(md_files) == 1: @@ -285,7 +285,7 @@ def get_category(path: Path) -> str: with open(path / 'Makefile', encoding='utf-8') as makefile_file: makefile = makefile_file.read() category_search = re.search( - r'echo\s+"Type:\s*(.*)"', makefile, re.M) + r'echo\s+"Type:\s*(.*)"', makefile, re.MULTILINE) if category_search: category = category_search.group(1) # Default to 'Sanity' if Makefile or Type not found diff --git a/tmt/log.py b/tmt/log.py index 28d6328dd5..66db71c516 100644 --- a/tmt/log.py +++ b/tmt/log.py @@ -75,6 +75,7 @@ class Topic(enum.Enum): LoggableValue = Union[ str, + dict[str, Any], int, bool, float, diff --git a/tmt/steps/__init__.py b/tmt/steps/__init__.py index d004894242..37b3497f16 100644 --- a/tmt/steps/__init__.py +++ b/tmt/steps/__init__.py @@ -1017,7 +1017,7 @@ def _patch_raw_datum( data_base = self._plugin_base_class._data_class debug3('compatible base', f'{data_base.__module__}.{data_base.__name__}') - debug3('compatible keys', list(data_base.keys())) + debug3('compatible keys', ', '.join(k for k in data_base.keys())) # noqa: SIM118 # Copy compatible keys only, ignore everything else # SIM118: Use `{key} in {dict}` instead of `{key} in {dict}.keys()`. @@ -1313,7 +1313,7 @@ def options(cls, how: Optional[str] = None) -> list[tmt.options.ClickOptionDecor # Include common options supported across all plugins return [ metadata.option - for _, _, _, metadata in ( + for _, _, _, _, metadata in ( container_field(cls._data_class, key) for key in container_keys(cls._data_class) ) diff --git a/tmt/steps/prepare/__init__.py b/tmt/steps/prepare/__init__.py index 3ec2080249..1665aba979 100644 --- a/tmt/steps/prepare/__init__.py +++ b/tmt/steps/prepare/__init__.py @@ -187,7 +187,8 @@ class DependencyCollection: @property def as_key(self) -> frozenset['tmt.base.DependencySimple']: - return frozenset(collection.dependencies) + # ignore[attr-defined]: mypy doesn't seem to understand collection as `frozenset` + return frozenset(collection.dependencies) # type: ignore[has-type] # All phases from all steps. phases = [ @@ -223,7 +224,9 @@ def as_key(self) -> frozenset['tmt.base.DependencySimple']: continue collected_requires[guest].dependencies += tmt.base.assert_simple_dependencies( - phase.essential_requires(), + # ignore[attr-defined]: mypy thinks that phase is Phase type, while its + # actually PluginClass + phase.essential_requires(), # type: ignore[attr-defined] 'After beakerlib processing, tests may have only simple requirements', self._logger) diff --git a/tmt/steps/prepare/distgit.py b/tmt/steps/prepare/distgit.py index b0d471aeab..4c9881b406 100644 --- a/tmt/steps/prepare/distgit.py +++ b/tmt/steps/prepare/distgit.py @@ -287,11 +287,11 @@ def go( self.discover.post_dist_git(created_content) # FIXME needs refactor of Prepare, tmt.base etc... # doing quick & dirty injection of prepareinstalls - for guest in self.step.plan.provision.guests(): + for g in self.step.plan.provision.guests(): collected_requires: list[tmt.base.DependencySimple] = [] collected_recommends: list[tmt.base.DependencySimple] = [] for test in self.step.plan.discover.tests(enabled=True): - if not test.enabled_on_guest(guest): + if not test.enabled_on_guest(g): continue collected_requires += tmt.base.assert_simple_dependencies( diff --git a/tmt/steps/provision/__init__.py b/tmt/steps/provision/__init__.py index 7443daf599..7a99b572ae 100644 --- a/tmt/steps/provision/__init__.py +++ b/tmt/steps/provision/__init__.py @@ -946,7 +946,7 @@ def _ansible_summary(self, output: Optional[str]) -> None: return keys = 'ok changed unreachable failed skipped rescued ignored'.split() for key in keys: - matched = re.search(rf'^.*\s:\s.*{key}=(\d+).*$', output, re.M) + matched = re.search(rf'^.*\s:\s.*{key}=(\d+).*$', output, re.MULTILINE) if matched and int(matched.group(1)) > 0: tasks = fmf.utils.listed(matched.group(1), 'task') self.verbose(key, tasks, 'green') diff --git a/tmt/utils/__init__.py b/tmt/utils/__init__.py index cdce54bd8b..6548ff30d6 100644 --- a/tmt/utils/__init__.py +++ b/tmt/utils/__init__.py @@ -2157,8 +2157,7 @@ class _MultiInvokableCommonMeta(_CommonMeta): that cannot be shared among classes. """ - # N805: ruff does not recognize this as a metaclass, `cls` is correct - def __init__(cls, *args: Any, **kwargs: Any) -> None: # noqa: N805 + def __init__(cls, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) cls.cli_invocations: list[tmt.cli.CliInvocation] = [] @@ -2929,25 +2928,9 @@ def container_items(container: ContainerInstance) -> Iterator[tuple[str, Any]]: yield field.name, container.__dict__[field.name] -@overload -def container_field( - container: ContainerClass, - key: str) -> tuple[str, str, dataclasses.Field[Any], 'FieldMetadata[Any]']: - pass - - -@overload -def container_field( - container: ContainerInstance, - key: str) -> tuple[str, str, Any, dataclasses.Field[Any], 'FieldMetadata[Any]']: - pass - - def container_field( container: Container, - key: str) -> Union[ - tuple[str, str, dataclasses.Field[Any], 'FieldMetadata[Any]'], - tuple[str, str, Any, dataclasses.Field[Any], 'FieldMetadata[Any]']]: + key: str) -> tuple[str, str, Any, dataclasses.Field[Any], 'FieldMetadata[Any]']: """ Return a dataclass/data container field info by the field's name. @@ -2965,14 +2948,12 @@ def container_field( continue metadata = field.metadata.get('tmt', FieldMetadata()) - if inspect.isclass(container): - return field.name, key_to_option(field.name), field, metadata - return ( field.name, key_to_option(field.name), - container.__dict__[field.name], - field, metadata) + container.__dict__[field.name] if not inspect.isclass(container) else None, + field, + metadata) if isinstance(container, DataContainer): raise GeneralError( @@ -3236,7 +3217,7 @@ def _produce_unserialized() -> Iterator[tuple[str, Any]]: for option, value in serialized.items(): key = option_to_key(option) - _, _, _, metadata = container_field(cls, key) + _, _, _, _, metadata = container_field(cls, key) if metadata.unserialize_callback: yield key, metadata.unserialize_callback(value) @@ -5215,7 +5196,7 @@ def url_and_name(self, cwd: Optional[Path] = None) -> list[tuple[str, str]]: ret_values = [] try: with open(cwd / self.sources_file_name) as f: - for line in f.readlines(): + for line in f: match = self.re_source.match(line) if match is None: raise GeneralError( @@ -5453,7 +5434,7 @@ def __init__( self._previous_line: Optional[str] = None - def __enter__(self: 'Self') -> 'Self': + def __enter__(self) -> 'Self': return self def __exit__(self, *args: object) -> None: @@ -5977,7 +5958,7 @@ def dataclass_normalize_field( value = raw_value if dataclasses.is_dataclass(container): - _, _, _, metadata = container_field(type(container), keyname) + _, _, _, _, metadata = container_field(type(container), keyname) if metadata.normalize_callback: value = metadata.normalize_callback(key_address, raw_value, logger) @@ -5988,7 +5969,7 @@ def dataclass_normalize_field( # test. # # Keep for debugging purposes, as long as normalization settles down. - if value is None or value == [] or value == (): + if not value: logger.debug( f'field "{key_address}" normalized to false-ish value', f'{container.__class__.__name__}.{keyname}',