Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

pre-commit hooks update; mypy, ruff fixes #3142

Merged
merged 10 commits into from
Aug 13, 2024
14 changes: 7 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https:/hhatto/autopep8
rev: 'v2.3.0'
rev: 'v2.3.1'
hooks:
- id: autopep8

Expand All @@ -21,7 +21,7 @@ repos:
- id: trailing-whitespace

- repo: https:/pre-commit/mirrors-mypy
rev: "v1.10.0"
rev: "v1.11.1"
hooks:
- id: mypy
language_version: "3.9"
Expand Down Expand Up @@ -67,7 +67,7 @@ repos:
args: [--config-file=pyproject.toml]

- repo: https:/RobertCraigie/pyright-python
rev: v1.1.367
rev: v1.1.371
hooks:
- id: pyright
language_version: "3.9"
Expand Down Expand Up @@ -110,7 +110,7 @@ repos:
- "types-docutils"

- repo: https:/python-jsonschema/check-jsonschema
rev: "0.28.5"
rev: "0.29.1"
hooks:
- id: check-metaschema
name: "Check JSON schemas validity"
Expand All @@ -123,7 +123,7 @@ repos:
files: ^tmt/schemas/.*\.yaml

- repo: https:/ansible-community/ansible-lint.git
rev: v24.6.0
rev: v24.7.0
hooks:
- id: ansible-lint
args:
Expand All @@ -139,15 +139,15 @@ repos:
# in order to be parsed by ansible-lint

- repo: https:/charliermarsh/ruff-pre-commit
rev: v0.4.9
rev: v0.5.7
hooks:
- id: ruff
args:
- '--fix'
- '--show-fixes'

- repo: https:/teemtee/tmt.git
rev: 1.33.0
rev: 1.35.0
hooks:
- id: tmt-lint
additional_dependencies:
Expand Down
11 changes: 4 additions & 7 deletions docs/scripts/generate-plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,10 +44,7 @@ def _is_ignored(
if metadata.internal is True:
return True

if hasattr(container, '_OPTIONLESS_FIELDS') and field.name in container._OPTIONLESS_FIELDS:
return True

return False
return hasattr(container, '_OPTIONLESS_FIELDS') and field.name in container._OPTIONLESS_FIELDS


def _is_inherited(
Expand All @@ -67,7 +64,7 @@ def container_ignored_fields(container: ContainerClass) -> list[str]:
field_names: list[str] = []

for field in tmt.utils.container_fields(container):
_, _, _, metadata = tmt.utils.container_field(container, field.name)
_, _, _, _, metadata = tmt.utils.container_field(container, field.name)

if _is_ignored(container, field, metadata):
field_names.append(field.name)
Expand All @@ -81,7 +78,7 @@ def container_inherited_fields(container: ContainerClass) -> list[str]:
field_names: list[str] = []

for field in tmt.utils.container_fields(container):
_, _, _, metadata = tmt.utils.container_field(container, field.name)
_, _, _, _, metadata = tmt.utils.container_field(container, field.name)

if _is_inherited(container, field, metadata):
field_names.append(field.name)
Expand All @@ -95,7 +92,7 @@ def container_intrinsic_fields(container: ContainerClass) -> list[str]:
field_names: list[str] = []

for field in tmt.utils.container_fields(container):
_, _, _, metadata = tmt.utils.container_field(container, field.name)
_, _, _, _, metadata = tmt.utils.container_field(container, field.name)

if _is_ignored(container, field, metadata):
continue
Expand Down
2 changes: 1 addition & 1 deletion docs/templates/plugins.rst.j2
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
:tocdepth: 0

{% macro render_field(plugin_id, plugin_data_class, field_name) %}
{% set _, option, _, metadata = container_field(plugin_data_class, field_name) %}
{% set _, option, _, _, metadata = container_field(plugin_data_class, field_name) %}

{% if metadata.metavar %}
{{ option }}: ``{{ metadata.metavar }}``
Expand Down
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,8 @@ lint.select = [
"PLC", # pylint-convention
"PLE", # pylint-error
"PLR", # pylint-refactor
"FLY", # flynt
"FURB", # refurb
"RUF", # ruff
"D", # pydocstyle
]
Expand All @@ -337,6 +339,7 @@ lint.ignore = [
"S603", # `subprocess` call: check for execution of untrusted input
"S607", # Starting a process with a partial executable path
"S105", # Possible hardcoded password assigned to: "PASS"
"SIM103", # Return the condition directly - can hurt readability

# pydocstyle
# TODO: the permanent list (drop this comment once the temporary list
Expand Down Expand Up @@ -372,6 +375,7 @@ lint.typing-modules = ["tmt._compat.typing"]
"S605", # Starting a process with a shell: seems safe, but may be changed in the future
"S318", # Using xml to parse untrusted data is known to be vulnerable to XML attacks
"S108", # Probable insecure usage of temporary file or directory: "{}"
"FLY002", # Use f-string instead of .join
]
# The purpose of tmt/_compat is to be used with TID251 (banned imports)
"tmt/_compat/**.py" = ["TID251"]
Expand Down
5 changes: 3 additions & 2 deletions tests/unit/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -779,8 +779,9 @@ def test_fedora_dist_git(tmppath):
(tmppath / 'sources').write_text('SHA512 (fn-1.tar.gz) = 09af\n')
(tmppath / 'tmt.spec').write_text('')
fedora_sources_obj = tmt.utils.FedoraDistGit()
assert [("https://src.fedoraproject.org/repo/pkgs/rpms/tmt/fn-1.tar.gz/sha512/09af/fn-1.tar.gz",
"fn-1.tar.gz")] == fedora_sources_obj.url_and_name(cwd=tmppath)
assert fedora_sources_obj.url_and_name(cwd=tmppath) == [(
"https://src.fedoraproject.org/repo/pkgs/rpms/tmt/fn-1.tar.gz/sha512/09af/fn-1.tar.gz",
"fn-1.tar.gz")]


class TestValidateGitStatus:
Expand Down
40 changes: 20 additions & 20 deletions tmt/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,8 @@ def read_manual(
new_cwd.mkdir(exist_ok=True)
os.chdir(new_cwd)

for case_id in case_ids:
testcase = nitrate.TestCase(case_id)
for cid in case_ids:
testcase = nitrate.TestCase(cid)
if testcase.status.name != 'CONFIRMED' and not disabled:
log.debug(f'{testcase.identifier} skipped (testcase is not CONFIRMED).')
continue
Expand Down Expand Up @@ -228,7 +228,7 @@ def read_datafile(
testinfo = datafile

# Beaker task name
search_result = re.search(regex_task, testinfo, re.M)
search_result = re.search(regex_task, testinfo, re.MULTILINE)
if search_result is None:
raise ConvertError("Unable to parse 'Name' from testinfo.desc.")
beaker_task = search_result.group(1).strip()
Expand All @@ -237,13 +237,13 @@ def read_datafile(
data['extra-summary'] = beaker_task

# Summary
search_result = re.search(regex_summary, testinfo, re.M)
search_result = re.search(regex_summary, testinfo, re.MULTILINE)
if search_result is not None:
data['summary'] = search_result.group(1).strip()
echo(style('summary: ', fg='green') + data['summary'])

# Test script
search_result = re.search(regex_test, datafile_test, re.M)
search_result = re.search(regex_test, datafile_test, re.MULTILINE)
if search_result is None:
if filename == 'metadata':
# entry_point property is optional. When absent 'make run' is used.
Expand Down Expand Up @@ -271,7 +271,7 @@ def read_datafile(
with open(makefile_path, encoding='utf-8') as makefile_file:
makefile = makefile_file.read()
search_result = \
re.search(makefile_regex_test, makefile, re.M)
re.search(makefile_regex_test, makefile, re.MULTILINE)
except OSError:
raise ConvertError("Makefile is missing.")
# Retrieve the path to the test file from the Makefile
Expand All @@ -280,7 +280,7 @@ def read_datafile(
# Read the test file and determine the framework used.
if test_path:
with open(test_path, encoding="utf-8") as test_file:
if re.search("beakerlib", test_file.read(), re.M):
if re.search("beakerlib", test_file.read(), re.MULTILINE):
data["framework"] = "beakerlib"
else:
data["framework"] = "shell"
Expand All @@ -291,28 +291,28 @@ def read_datafile(
raise ConvertError(f"Unable to open '{test_path}'.")

# Contact
search_result = re.search(regex_contact, testinfo, re.M)
search_result = re.search(regex_contact, testinfo, re.MULTILINE)
if search_result is not None:
data['contact'] = search_result.group(1).strip()
echo(style('contact: ', fg='green') + data['contact'])

if filename == 'Makefile':
# Component
search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.M)
search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.MULTILINE)
if search_result is not None:
data['component'] = search_result.group(1).split()
echo(style('component: ', fg='green') +
' '.join(data['component']))

# Duration
search_result = re.search(regex_duration, testinfo, re.M)
search_result = re.search(regex_duration, testinfo, re.MULTILINE)
if search_result is not None:
data['duration'] = search_result.group(1).strip()
echo(style('duration: ', fg='green') + data['duration'])

if filename == 'Makefile':
# Environment
variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.M)
variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.MULTILINE)
if variables:
data['environment'] = {}
for variable in variables:
Expand All @@ -334,15 +334,15 @@ def sanitize_name(name: str) -> str:
return name

# RhtsRequires or repoRequires (optional) goes to require
requires = re.findall(regex_require, testinfo, re.M)
requires = re.findall(regex_require, testinfo, re.MULTILINE)
if requires:
data['require'] = [
sanitize_name(require.strip()) for line in requires
for require in line.split(rec_separator)]
echo(style('require: ', fg='green') + ' '.join(data['require']))

# Requires or softDependencies (optional) goes to recommend
recommends = re.findall(regex_recommend, testinfo, re.M)
recommends = re.findall(regex_recommend, testinfo, re.MULTILINE)
if recommends:
data['recommend'] = [
sanitize_name(recommend.strip()) for line in recommends
Expand All @@ -352,7 +352,7 @@ def sanitize_name(name: str) -> str:

if filename == 'Makefile':
# Convert Type into tags
search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.M)
search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.MULTILINE)
if search_result is not None:
makefile_type = search_result.group(1).strip()
if 'all' in [type_.lower() for type_ in types]:
Expand All @@ -364,7 +364,7 @@ def sanitize_name(name: str) -> str:
echo(style("tag: ", fg="green") + " ".join(tags))
data["tag"] = tags
# Add relevant bugs to the 'link' attribute
for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.M):
for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.MULTILINE):
for bug in re.findall(r'(\d+)', bug_line):
add_link(bug, data, SYSTEM_BUGZILLA)

Expand Down Expand Up @@ -519,7 +519,7 @@ def target_content_run() -> list[str]:
if '\\\n' in datafile:
datafile_test = re.sub(r'\\\n', newline_stub, datafile)
regexp = r'^run:.*\n\t(.*)$'
search_result = re.search(regexp, datafile_test, re.M)
search_result = re.search(regexp, datafile_test, re.MULTILINE)
if search_result is None:
# Target not found in the Makefile
return []
Expand All @@ -533,7 +533,7 @@ def target_content_run() -> list[str]:
def target_content_build() -> list[str]:
""" Extract lines from the build content """
regexp = r'^build:.*\n((?:\t[^\n]*\n?)*)'
search_result = re.search(regexp, datafile, re.M)
search_result = re.search(regexp, datafile, re.MULTILINE)
if search_result is None:
# Target not found in the Makefile
return []
Expand Down Expand Up @@ -760,7 +760,7 @@ def read_tier(tag: str, data: NitrateDataType) -> None:
Check for the tier attribute, if there are multiple TierX tags, pick
the one with the lowest index.
"""
tier_match = re.match(r'^Tier ?(?P<num>\d+)$', tag, re.I)
tier_match = re.match(r'^Tier ?(?P<num>\d+)$', tag, re.IGNORECASE)
if tier_match:
num = tier_match.group('num')
if 'tier' in data:
Expand Down Expand Up @@ -984,12 +984,12 @@ def extract_relevancy(
return None
# Fallback to the original relevancy syntax
# The relevancy definition begins with the header
matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.I + re.M + re.S)
matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.IGNORECASE + re.MULTILINE + re.DOTALL)
if not matched:
return None
relevancy = matched.groups()[0]
# Remove possible additional text after an empty line
matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.S)
matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.DOTALL)
if matched:
relevancy = matched.groups()[0]
return relevancy.strip()
Expand Down
2 changes: 1 addition & 1 deletion tmt/export/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def check_md_file_respects_spec(md_path: Path) -> list[str]:
html_headings_from_file = [i[0] for i in
re.findall('(^<h[1-4]>(.+?)</h[1-4]>$)',
md_to_html,
re.M)]
re.MULTILINE)]

# No invalid headings in the file w/o headings
if not html_headings_from_file:
Expand Down
4 changes: 2 additions & 2 deletions tmt/export/nitrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def return_markdown_file() -> Optional[Path]:
""" Return path to the markdown file """
files = '\n'.join(os.listdir())
reg_exp = r'.+\.md$'
md_files = re.findall(reg_exp, files, re.M)
md_files = re.findall(reg_exp, files, re.MULTILINE)
fail_message = ("in the current working directory.\n"
"Manual steps couldn't be exported")
if len(md_files) == 1:
Expand All @@ -285,7 +285,7 @@ def get_category(path: Path) -> str:
with open(path / 'Makefile', encoding='utf-8') as makefile_file:
makefile = makefile_file.read()
category_search = re.search(
r'echo\s+"Type:\s*(.*)"', makefile, re.M)
r'echo\s+"Type:\s*(.*)"', makefile, re.MULTILINE)
if category_search:
category = category_search.group(1)
# Default to 'Sanity' if Makefile or Type not found
Expand Down
1 change: 1 addition & 0 deletions tmt/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ class Topic(enum.Enum):

LoggableValue = Union[
str,
dict[str, Any],
int,
bool,
float,
Expand Down
4 changes: 2 additions & 2 deletions tmt/steps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1017,7 +1017,7 @@ def _patch_raw_datum(
data_base = self._plugin_base_class._data_class

debug3('compatible base', f'{data_base.__module__}.{data_base.__name__}')
debug3('compatible keys', list(data_base.keys()))
debug3('compatible keys', ', '.join(k for k in data_base.keys())) # noqa: SIM118

# Copy compatible keys only, ignore everything else
# SIM118: Use `{key} in {dict}` instead of `{key} in {dict}.keys()`.
Expand Down Expand Up @@ -1313,7 +1313,7 @@ def options(cls, how: Optional[str] = None) -> list[tmt.options.ClickOptionDecor
# Include common options supported across all plugins
return [
metadata.option
for _, _, _, metadata in (
for _, _, _, _, metadata in (
container_field(cls._data_class, key)
for key in container_keys(cls._data_class)
)
Expand Down
7 changes: 5 additions & 2 deletions tmt/steps/prepare/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,8 @@ class DependencyCollection:

@property
def as_key(self) -> frozenset['tmt.base.DependencySimple']:
return frozenset(collection.dependencies)
# ignore[attr-defined]: mypy doesn't seem to understand collection as `frozenset`
return frozenset(collection.dependencies) # type: ignore[has-type]

# All phases from all steps.
phases = [
Expand Down Expand Up @@ -223,7 +224,9 @@ def as_key(self) -> frozenset['tmt.base.DependencySimple']:
continue

collected_requires[guest].dependencies += tmt.base.assert_simple_dependencies(
phase.essential_requires(),
# ignore[attr-defined]: mypy thinks that phase is Phase type, while its
# actually PluginClass
phase.essential_requires(), # type: ignore[attr-defined]
'After beakerlib processing, tests may have only simple requirements',
self._logger)

Expand Down
Loading
Loading