Skip to content

Commit

Permalink
Enable flynt and refurb ruff rules
Browse files Browse the repository at this point in the history
Replacing regex flag aliases with full name to pass FURB167.
Removing one .readlines() usage for efficiency (FURB129).
Flynt f-strings checks passes, ignoring tests/unit dir.
  • Loading branch information
martinhoyer committed Aug 9, 2024
1 parent 00701f2 commit 7aae691
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 23 deletions.
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -318,6 +318,8 @@ lint.select = [
"PLC", # pylint-convention
"PLE", # pylint-error
"PLR", # pylint-refactor
"FLY", # flynt
"FURB", # refurb
"RUF", # ruff
"D", # pydocstyle
]
Expand Down Expand Up @@ -373,6 +375,7 @@ lint.typing-modules = ["tmt._compat.typing"]
"S605", # Starting a process with a shell: seems safe, but may be changed in the future
"S318", # Using xml to parse untrusted data is known to be vulnerable to XML attacks
"S108", # Probable insecure usage of temporary file or directory: "{}"
"FLY002", # Use f-string instead of .join
]
# The purpose of tmt/_compat is to be used with TID251 (banned imports)
"tmt/_compat/**.py" = ["TID251"]
Expand Down
36 changes: 18 additions & 18 deletions tmt/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ def read_datafile(
testinfo = datafile

# Beaker task name
search_result = re.search(regex_task, testinfo, re.M)
search_result = re.search(regex_task, testinfo, re.MULTILINE)
if search_result is None:
raise ConvertError("Unable to parse 'Name' from testinfo.desc.")
beaker_task = search_result.group(1).strip()
Expand All @@ -237,13 +237,13 @@ def read_datafile(
data['extra-summary'] = beaker_task

# Summary
search_result = re.search(regex_summary, testinfo, re.M)
search_result = re.search(regex_summary, testinfo, re.MULTILINE)
if search_result is not None:
data['summary'] = search_result.group(1).strip()
echo(style('summary: ', fg='green') + data['summary'])

# Test script
search_result = re.search(regex_test, datafile_test, re.M)
search_result = re.search(regex_test, datafile_test, re.MULTILINE)
if search_result is None:
if filename == 'metadata':
# entry_point property is optional. When absent 'make run' is used.
Expand Down Expand Up @@ -271,7 +271,7 @@ def read_datafile(
with open(makefile_path, encoding='utf-8') as makefile_file:
makefile = makefile_file.read()
search_result = \
re.search(makefile_regex_test, makefile, re.M)
re.search(makefile_regex_test, makefile, re.MULTILINE)
except OSError:
raise ConvertError("Makefile is missing.")
# Retrieve the path to the test file from the Makefile
Expand All @@ -280,7 +280,7 @@ def read_datafile(
# Read the test file and determine the framework used.
if test_path:
with open(test_path, encoding="utf-8") as test_file:
if re.search("beakerlib", test_file.read(), re.M):
if re.search("beakerlib", test_file.read(), re.MULTILINE):
data["framework"] = "beakerlib"
else:
data["framework"] = "shell"
Expand All @@ -291,28 +291,28 @@ def read_datafile(
raise ConvertError(f"Unable to open '{test_path}'.")

# Contact
search_result = re.search(regex_contact, testinfo, re.M)
search_result = re.search(regex_contact, testinfo, re.MULTILINE)
if search_result is not None:
data['contact'] = search_result.group(1).strip()
echo(style('contact: ', fg='green') + data['contact'])

if filename == 'Makefile':
# Component
search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.M)
search_result = re.search(r'^RunFor:[ \t]*(.*)$', testinfo, re.MULTILINE)
if search_result is not None:
data['component'] = search_result.group(1).split()
echo(style('component: ', fg='green') +
' '.join(data['component']))

# Duration
search_result = re.search(regex_duration, testinfo, re.M)
search_result = re.search(regex_duration, testinfo, re.MULTILINE)
if search_result is not None:
data['duration'] = search_result.group(1).strip()
echo(style('duration: ', fg='green') + data['duration'])

if filename == 'Makefile':
# Environment
variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.M)
variables = re.findall(r'^Environment:[ \t]*(.*)$', testinfo, re.MULTILINE)
if variables:
data['environment'] = {}
for variable in variables:
Expand All @@ -334,15 +334,15 @@ def sanitize_name(name: str) -> str:
return name

# RhtsRequires or repoRequires (optional) goes to require
requires = re.findall(regex_require, testinfo, re.M)
requires = re.findall(regex_require, testinfo, re.MULTILINE)
if requires:
data['require'] = [
sanitize_name(require.strip()) for line in requires
for require in line.split(rec_separator)]
echo(style('require: ', fg='green') + ' '.join(data['require']))

# Requires or softDependencies (optional) goes to recommend
recommends = re.findall(regex_recommend, testinfo, re.M)
recommends = re.findall(regex_recommend, testinfo, re.MULTILINE)
if recommends:
data['recommend'] = [
sanitize_name(recommend.strip()) for line in recommends
Expand All @@ -352,7 +352,7 @@ def sanitize_name(name: str) -> str:

if filename == 'Makefile':
# Convert Type into tags
search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.M)
search_result = re.search(r'^Type:[ \t]*(.*)$', testinfo, re.MULTILINE)
if search_result is not None:
makefile_type = search_result.group(1).strip()
if 'all' in [type_.lower() for type_ in types]:
Expand All @@ -364,7 +364,7 @@ def sanitize_name(name: str) -> str:
echo(style("tag: ", fg="green") + " ".join(tags))
data["tag"] = tags
# Add relevant bugs to the 'link' attribute
for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.M):
for bug_line in re.findall(r'^Bug:\s*([0-9\s]+)', testinfo, re.MULTILINE):
for bug in re.findall(r'(\d+)', bug_line):
add_link(bug, data, SYSTEM_BUGZILLA)

Expand Down Expand Up @@ -519,7 +519,7 @@ def target_content_run() -> list[str]:
if '\\\n' in datafile:
datafile_test = re.sub(r'\\\n', newline_stub, datafile)
regexp = r'^run:.*\n\t(.*)$'
search_result = re.search(regexp, datafile_test, re.M)
search_result = re.search(regexp, datafile_test, re.MULTILINE)
if search_result is None:
# Target not found in the Makefile
return []
Expand All @@ -533,7 +533,7 @@ def target_content_run() -> list[str]:
def target_content_build() -> list[str]:
""" Extract lines from the build content """
regexp = r'^build:.*\n((?:\t[^\n]*\n?)*)'
search_result = re.search(regexp, datafile, re.M)
search_result = re.search(regexp, datafile, re.MULTILINE)
if search_result is None:
# Target not found in the Makefile
return []
Expand Down Expand Up @@ -760,7 +760,7 @@ def read_tier(tag: str, data: NitrateDataType) -> None:
Check for the tier attribute, if there are multiple TierX tags, pick
the one with the lowest index.
"""
tier_match = re.match(r'^Tier ?(?P<num>\d+)$', tag, re.I)
tier_match = re.match(r'^Tier ?(?P<num>\d+)$', tag, re.IGNORECASE)
if tier_match:
num = tier_match.group('num')
if 'tier' in data:
Expand Down Expand Up @@ -984,12 +984,12 @@ def extract_relevancy(
return None
# Fallback to the original relevancy syntax
# The relevancy definition begins with the header
matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.I + re.M + re.S)
matched = re.search(RELEVANCY_LEGACY_HEADER, notes, re.IGNORECASE + re.MULTILINE + re.DOTALL)
if not matched:
return None
relevancy = matched.groups()[0]
# Remove possible additional text after an empty line
matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.S)
matched = re.search(r"(.*?)\n\s*\n.*", relevancy, re.DOTALL)
if matched:
relevancy = matched.groups()[0]
return relevancy.strip()
Expand Down
2 changes: 1 addition & 1 deletion tmt/export/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -379,7 +379,7 @@ def check_md_file_respects_spec(md_path: Path) -> list[str]:
html_headings_from_file = [i[0] for i in
re.findall('(^<h[1-4]>(.+?)</h[1-4]>$)',
md_to_html,
re.M)]
re.MULTILINE)]

# No invalid headings in the file w/o headings
if not html_headings_from_file:
Expand Down
4 changes: 2 additions & 2 deletions tmt/export/nitrate.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ def return_markdown_file() -> Optional[Path]:
""" Return path to the markdown file """
files = '\n'.join(os.listdir())
reg_exp = r'.+\.md$'
md_files = re.findall(reg_exp, files, re.M)
md_files = re.findall(reg_exp, files, re.MULTILINE)
fail_message = ("in the current working directory.\n"
"Manual steps couldn't be exported")
if len(md_files) == 1:
Expand All @@ -285,7 +285,7 @@ def get_category(path: Path) -> str:
with open(path / 'Makefile', encoding='utf-8') as makefile_file:
makefile = makefile_file.read()
category_search = re.search(
r'echo\s+"Type:\s*(.*)"', makefile, re.M)
r'echo\s+"Type:\s*(.*)"', makefile, re.MULTILINE)
if category_search:
category = category_search.group(1)
# Default to 'Sanity' if Makefile or Type not found
Expand Down
2 changes: 1 addition & 1 deletion tmt/steps/provision/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -946,7 +946,7 @@ def _ansible_summary(self, output: Optional[str]) -> None:
return
keys = 'ok changed unreachable failed skipped rescued ignored'.split()
for key in keys:
matched = re.search(rf'^.*\s:\s.*{key}=(\d+).*$', output, re.M)
matched = re.search(rf'^.*\s:\s.*{key}=(\d+).*$', output, re.MULTILINE)
if matched and int(matched.group(1)) > 0:
tasks = fmf.utils.listed(matched.group(1), 'task')
self.verbose(key, tasks, 'green')
Expand Down
2 changes: 1 addition & 1 deletion tmt/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5193,7 +5193,7 @@ def url_and_name(self, cwd: Optional[Path] = None) -> list[tuple[str, str]]:
ret_values = []
try:
with open(cwd / self.sources_file_name) as f:
for line in f.readlines():
for line in f:
match = self.re_source.match(line)
if match is None:
raise GeneralError(
Expand Down

0 comments on commit 7aae691

Please sign in to comment.