From e63d975c9606e409bf4c74fa1d4a90bfbf83a943 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 18 Dec 2023 07:00:56 -0500 Subject: [PATCH 01/19] added opensearch keys to a secret --- opl/http.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/opl/http.py b/opl/http.py index 3e20a32..9491853 100644 --- a/opl/http.py +++ b/opl/http.py @@ -1,12 +1,23 @@ import logging import requests - +from requests.auth import HTTPBasicAuth import urllib3 - - -session = requests.Session() - +import os + +enable_new_search = os.getenv('NEW_SEARCH_ENABLED', 'false').lower() == 'true' + +if enable_new_search: + username = os.getenv('OPEN_SEARCH_DASBOARD') + password = os.getenv('OPEN_SEARCH_PASSWORD') + session = requests.Session() + session.auth = HTTPBasicAuth(username, password) + session.verify = False + session.headers.update({ + "Content-Type": "application/json", + }) +else: + session = requests.Session() def disable_insecure_request_warnings(disable_it): if disable_it: From cf5069d2b6422e62a3ced56c1aecd2637231d3c7 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 18 Dec 2023 07:06:44 -0500 Subject: [PATCH 02/19] fixing lint errors --- opl/http.py | 1 + 1 file changed, 1 insertion(+) diff --git a/opl/http.py b/opl/http.py index 9491853..4e18d19 100644 --- a/opl/http.py +++ b/opl/http.py @@ -19,6 +19,7 @@ else: session = requests.Session() + def disable_insecure_request_warnings(disable_it): if disable_it: logging.debug("Disabling insecure request warnings") From 2739348771ca3c3d8bf900e53eef25198f681594 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 18 Dec 2023 07:27:10 -0500 Subject: [PATCH 03/19] fixing black formatter issues --- opl/http.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/opl/http.py b/opl/http.py index 4e18d19..750829d 100644 --- a/opl/http.py +++ b/opl/http.py @@ -2,20 +2,23 @@ import requests from requests.auth import HTTPBasicAuth + import urllib3 import os -enable_new_search = os.getenv('NEW_SEARCH_ENABLED', 'false').lower() == 'true' +enable_new_search = os.getenv("NEW_SEARCH_ENABLED", "false").lower() == "true" if enable_new_search: - username = os.getenv('OPEN_SEARCH_DASBOARD') - password = os.getenv('OPEN_SEARCH_PASSWORD') + username = os.getenv("OPEN_SEARCH_USERNAME") + password = os.getenv("OPEN_SEARCH_PASSWORD") session = requests.Session() session.auth = HTTPBasicAuth(username, password) session.verify = False - session.headers.update({ - "Content-Type": "application/json", - }) + session.headers.update( + { + "Content-Type": "application/json", + } + ) else: session = requests.Session() From f6d34e4cfde21fcca6ba43ffe9eefe3c3e1835e5 Mon Sep 17 00:00:00 2001 From: Sarath Padakandla Date: Wed, 10 Jan 2024 17:53:16 -0500 Subject: [PATCH 04/19] added new elastic search authentication --- opl/investigator/elasticsearch_loader.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/opl/investigator/elasticsearch_loader.py b/opl/investigator/elasticsearch_loader.py index 5c76c17..20561ff 100644 --- a/opl/investigator/elasticsearch_loader.py +++ b/opl/investigator/elasticsearch_loader.py @@ -1,10 +1,15 @@ import json import logging import tempfile +from requests.auth import HTTPBasicAuth import opl.http import opl.status_data +# Environment variables for OpenSearch credentials +open_search_username = "insights_perf" +open_search_password = os.environ.get('OPEN_SEARCH_PASSWORD') + def load(server, index, query, paths): out = {} @@ -21,7 +26,7 @@ def load(server, index, query, paths): f"Querying ES with url={url}, headers={headers} and json={json.dumps(data)}" ) - response = opl.http.get(url, headers=headers, json=data) + response = opl.http.get(url, auth=HTTPBasicAuth(open_search_username, open_search_password), headers=headers, json=data) for item in response["hits"]["hits"]: logging.debug( From f0fa9348c78c2d045b851ce410ee2e09c7054d51 Mon Sep 17 00:00:00 2001 From: Sarath Padakandla Date: Wed, 10 Jan 2024 17:54:54 -0500 Subject: [PATCH 05/19] Update http.py added verification false for https --- opl/http.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/opl/http.py b/opl/http.py index 750829d..c90b0c5 100644 --- a/opl/http.py +++ b/opl/http.py @@ -1,28 +1,13 @@ import logging import requests -from requests.auth import HTTPBasicAuth import urllib3 -import os - -enable_new_search = os.getenv("NEW_SEARCH_ENABLED", "false").lower() == "true" - -if enable_new_search: - username = os.getenv("OPEN_SEARCH_USERNAME") - password = os.getenv("OPEN_SEARCH_PASSWORD") - session = requests.Session() - session.auth = HTTPBasicAuth(username, password) - session.verify = False - session.headers.update( - { - "Content-Type": "application/json", - } - ) -else: - session = requests.Session() +session = requests.Session() +session.verify=False + def disable_insecure_request_warnings(disable_it): if disable_it: logging.debug("Disabling insecure request warnings") From b5dea9ddeebe206a49270b889fe943d8e4759e2c Mon Sep 17 00:00:00 2001 From: spadakan Date: Thu, 11 Jan 2024 09:07:50 -0500 Subject: [PATCH 06/19] fixing elastic search update using investigator conf --- opl/http.py | 3 +- opl/investigator/config.py | 8 ++++ opl/investigator/elasticsearch_decisions.py | 16 +++++++- opl/investigator/elasticsearch_loader.py | 43 +++++++++++++-------- opl/pass_or_fail.py | 17 +++++++- 5 files changed, 67 insertions(+), 20 deletions(-) diff --git a/opl/http.py b/opl/http.py index c90b0c5..4ce0d16 100644 --- a/opl/http.py +++ b/opl/http.py @@ -6,12 +6,13 @@ session = requests.Session() -session.verify=False + def disable_insecure_request_warnings(disable_it): if disable_it: logging.debug("Disabling insecure request warnings") urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + session.verify = False def req(method, url, **kwargs): diff --git a/opl/investigator/config.py b/opl/investigator/config.py index b163441..9731a85 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -75,6 +75,10 @@ def load_config(conf, fp): assert not conf.history_es_server.endswith("/") conf.history_es_index = data["history"]["es_index"] conf.history_es_query = data["history"]["es_query"] + if "es_server_user" in data["history"]: + conf.es_server_user = data["history"]["es_server_user"] + conf.es_server_pass = data["history"]["es_server_pass"] + conf.es_server_verify = data["history"]["es_server_verify"] if conf.history_type == "sd_dir": conf.history_dir = data["history"]["dir"] @@ -88,6 +92,10 @@ def load_config(conf, fp): conf.decisions_es_server = data["decisions"]["es_server"] assert not conf.decisions_es_server.endswith("/") conf.decisions_es_index = data["decisions"]["es_index"] + if "es_server_user" in data["decisions"]: + conf.decisions_es_server_user = data["decisions"]["es_server_user"] + conf.decisions_es_server_pass = data["decisions"]["es_server_pass"] + conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] if conf.decisions_type == "csv": conf.decisions_filename = data["decisions"]["filename"] diff --git a/opl/investigator/elasticsearch_decisions.py b/opl/investigator/elasticsearch_decisions.py index b4bba77..b090413 100644 --- a/opl/investigator/elasticsearch_decisions.py +++ b/opl/investigator/elasticsearch_decisions.py @@ -6,7 +6,9 @@ import requests -def store(server, index, decisions): +def store(server, index, decisions, **kwargs): + es_server_user = kwargs.get("es_server_user") + es_server_pass = kwargs.get("es_server_pass") # This is our workaround on how to add additional metadata about the decision job_name = os.environ.get("JOB_NAME", "") build_url = os.environ.get("BUILD_URL", "") @@ -26,7 +28,17 @@ def store(server, index, decisions): f"Storing decision to ES url={url}, headers={headers} and json={json.dumps(decision)}" ) - response = requests.post(url, headers=headers, json=decision) + if es_server_user and es_server_pass: + # fetch the password from Jenkins credentials + open_search_password = os.environ.get(es_server_pass) + response = requests.post( + url, + auth=requests.auth.HTTPBasicAuth(es_server_user, open_search_password), + headers=headers, + json=decision, + ) + else: + response = requests.post(url, headers=headers, json=decision) if not response.ok: logging.warning(f"Failed to store decision to ES: {response.text}") diff --git a/opl/investigator/elasticsearch_loader.py b/opl/investigator/elasticsearch_loader.py index 20561ff..7ca600d 100644 --- a/opl/investigator/elasticsearch_loader.py +++ b/opl/investigator/elasticsearch_loader.py @@ -1,17 +1,17 @@ import json import logging import tempfile -from requests.auth import HTTPBasicAuth +import os import opl.http import opl.status_data +from requests.auth import HTTPBasicAuth -# Environment variables for OpenSearch credentials -open_search_username = "insights_perf" -open_search_password = os.environ.get('OPEN_SEARCH_PASSWORD') +def load(server, index, query, paths, **kwargs): + es_server_user = kwargs.get("es_server_user") + es_server_pass = kwargs.get("es_server_pass") -def load(server, index, query, paths): out = {} for path in paths: @@ -26,18 +26,29 @@ def load(server, index, query, paths): f"Querying ES with url={url}, headers={headers} and json={json.dumps(data)}" ) - response = opl.http.get(url, auth=HTTPBasicAuth(open_search_username, open_search_password), headers=headers, json=data) - - for item in response["hits"]["hits"]: - logging.debug( - f"Loading data from document ID {item['_id']} with field id={item['_source']['id'] if 'id' in item['_source'] else None} or parameters.run={item['_source']['parameters']['run'] if 'run' in item['_source']['parameters'] else None}" + if es_server_user and es_server_pass: + # fetch the password from Jenkins credentials + open_search_password = os.environ.get(es_server_pass) + response = opl.http.get( + url, + auth=HTTPBasicAuth(es_server_user, open_search_password), + headers=headers, + json=data, ) - tmpfile = tempfile.NamedTemporaryFile(prefix=item["_id"], delete=False).name - sd = opl.status_data.StatusData(tmpfile, data=item["_source"]) - for path in paths: - tmp = sd.get(path) - if tmp is not None: - out[path].append(tmp) + else: + response = opl.http.get(url, headers=headers, json=data) + + if response: + for item in response["hits"]["hits"]: + logging.debug( + f"Loading data from document ID {item['_id']} with field id={item['_source']['id'] if 'id' in item['_source'] else None} or parameters.run={item['_source']['parameters']['run'] if 'run' in item['_source']['parameters'] else None}" + ) + tmpfile = tempfile.NamedTemporaryFile(prefix=item["_id"], delete=False).name + sd = opl.status_data.StatusData(tmpfile, data=item["_source"]) + for path in paths: + tmp = sd.get(path) + if tmp is not None: + out[path].append(tmp) logging.debug(f"Loaded {out}") return out diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index 0be5e71..4bb3377 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -114,12 +114,18 @@ def main(): if args.history_type == "csv": history = opl.investigator.csv_loader.load(args.history_file, args.sets) elif args.history_type == "elasticsearch": + if hasattr(args, "es_server_user"): + # if new elasticsearch credentials are provided, use them + opl.http.disable_insecure_request_warnings(args.es_server_verify) history = opl.investigator.elasticsearch_loader.load( args.history_es_server, args.history_es_index, args.history_es_query, args.sets, + es_server_user=getattr(args, "es_server_user", None), + es_server_pass=getattr(args, "es_server_pass", None), ) + elif args.history_type == "sd_dir": history = opl.investigator.sd_dir_loader.load( args.history_dir, args.history_matchers, args.sets @@ -200,8 +206,17 @@ def main(): if not args.dry_run: for d_type in args.decisions_type: if d_type == "elasticsearch": + if hasattr(args, "es_server_user"): + # if new elasticsearch credentials are provided, use them + opl.http.disable_insecure_request_warnings( + args.decisions_es_server_verify + ) opl.investigator.elasticsearch_decisions.store( - args.decisions_es_server, args.decisions_es_index, info_all + args.decisions_es_server, + args.decisions_es_index, + info_all, + es_server_user=getattr(args, "decisions_es_server_user", None), + es_server_pass=getattr(args, "decisions_es_server_pass", None), ) if d_type == "csv": opl.investigator.csv_decisions.store(args.decisions_filename, info_all) From c24965d74b53c6bb7524588e3949952899222ae5 Mon Sep 17 00:00:00 2001 From: spadakan Date: Fri, 12 Jan 2024 08:04:11 -0500 Subject: [PATCH 07/19] updated after JHutar comments --- opl/http.py | 7 ++++++- opl/investigator/config.py | 6 ++++-- opl/investigator/elasticsearch_loader.py | 21 ++++++++++----------- opl/pass_or_fail.py | 18 ++++++++---------- 4 files changed, 28 insertions(+), 24 deletions(-) diff --git a/opl/http.py b/opl/http.py index 4ce0d16..2069c28 100644 --- a/opl/http.py +++ b/opl/http.py @@ -8,11 +8,16 @@ session = requests.Session() +def insecure(): + session.verify = False + logging.debug("Disabling insecure request warnings") + disable_insecure_request_warnings(True) + + def disable_insecure_request_warnings(disable_it): if disable_it: logging.debug("Disabling insecure request warnings") urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) - session.verify = False def req(method, url, **kwargs): diff --git a/opl/investigator/config.py b/opl/investigator/config.py index 9731a85..08a9d0f 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -77,7 +77,7 @@ def load_config(conf, fp): conf.history_es_query = data["history"]["es_query"] if "es_server_user" in data["history"]: conf.es_server_user = data["history"]["es_server_user"] - conf.es_server_pass = data["history"]["es_server_pass"] + conf.es_server_pass_env_var = data["history"]["es_server_pass_env_var"] conf.es_server_verify = data["history"]["es_server_verify"] if conf.history_type == "sd_dir": @@ -94,7 +94,9 @@ def load_config(conf, fp): conf.decisions_es_index = data["decisions"]["es_index"] if "es_server_user" in data["decisions"]: conf.decisions_es_server_user = data["decisions"]["es_server_user"] - conf.decisions_es_server_pass = data["decisions"]["es_server_pass"] + conf.decisions_es_server_pass_env_var = data["decisions"][ + "decisions_es_server_pass_env_var" + ] conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] if conf.decisions_type == "csv": conf.decisions_filename = data["decisions"]["filename"] diff --git a/opl/investigator/elasticsearch_loader.py b/opl/investigator/elasticsearch_loader.py index 7ca600d..8b67797 100644 --- a/opl/investigator/elasticsearch_loader.py +++ b/opl/investigator/elasticsearch_loader.py @@ -38,17 +38,16 @@ def load(server, index, query, paths, **kwargs): else: response = opl.http.get(url, headers=headers, json=data) - if response: - for item in response["hits"]["hits"]: - logging.debug( - f"Loading data from document ID {item['_id']} with field id={item['_source']['id'] if 'id' in item['_source'] else None} or parameters.run={item['_source']['parameters']['run'] if 'run' in item['_source']['parameters'] else None}" - ) - tmpfile = tempfile.NamedTemporaryFile(prefix=item["_id"], delete=False).name - sd = opl.status_data.StatusData(tmpfile, data=item["_source"]) - for path in paths: - tmp = sd.get(path) - if tmp is not None: - out[path].append(tmp) + for item in response["hits"]["hits"]: + logging.debug( + f"Loading data from document ID {item['_id']} with field id={item['_source']['id'] if 'id' in item['_source'] else None} or parameters.run={item['_source']['parameters']['run'] if 'run' in item['_source']['parameters'] else None}" + ) + tmpfile = tempfile.NamedTemporaryFile(prefix=item["_id"], delete=False).name + sd = opl.status_data.StatusData(tmpfile, data=item["_source"]) + for path in paths: + tmp = sd.get(path) + if tmp is not None: + out[path].append(tmp) logging.debug(f"Loaded {out}") return out diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index 4bb3377..c23ed02 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -114,16 +114,16 @@ def main(): if args.history_type == "csv": history = opl.investigator.csv_loader.load(args.history_file, args.sets) elif args.history_type == "elasticsearch": - if hasattr(args, "es_server_user"): - # if new elasticsearch credentials are provided, use them - opl.http.disable_insecure_request_warnings(args.es_server_verify) + if hasattr(args, "es_server_verify"): + # SSL verification is disabled by default + opl.http.insecure() history = opl.investigator.elasticsearch_loader.load( args.history_es_server, args.history_es_index, args.history_es_query, args.sets, es_server_user=getattr(args, "es_server_user", None), - es_server_pass=getattr(args, "es_server_pass", None), + es_server_pass=getattr(args, "es_server_pass_env_var", None), ) elif args.history_type == "sd_dir": @@ -206,17 +206,15 @@ def main(): if not args.dry_run: for d_type in args.decisions_type: if d_type == "elasticsearch": - if hasattr(args, "es_server_user"): - # if new elasticsearch credentials are provided, use them - opl.http.disable_insecure_request_warnings( - args.decisions_es_server_verify - ) + if hasattr(args, "es_server_verify"): + # disable SSL verification + opl.http.insecure() opl.investigator.elasticsearch_decisions.store( args.decisions_es_server, args.decisions_es_index, info_all, es_server_user=getattr(args, "decisions_es_server_user", None), - es_server_pass=getattr(args, "decisions_es_server_pass", None), + es_server_pass=getattr(args, "decisions_es_server_pass_env_var", None), ) if d_type == "csv": opl.investigator.csv_decisions.store(args.decisions_filename, info_all) From bc212ec97d91a6815e92ce68b4e1208e0b3369ea Mon Sep 17 00:00:00 2001 From: spadakan Date: Fri, 12 Jan 2024 10:21:56 -0500 Subject: [PATCH 08/19] Part3 comments updated --- opl/http.py | 2 +- opl/investigator/elasticsearch_decisions.py | 6 +++--- opl/investigator/elasticsearch_loader.py | 6 +++--- opl/pass_or_fail.py | 6 ++++-- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/opl/http.py b/opl/http.py index 2069c28..3e8ce3c 100644 --- a/opl/http.py +++ b/opl/http.py @@ -10,7 +10,7 @@ def insecure(): session.verify = False - logging.debug("Disabling insecure request warnings") + logging.debug("Disabling SSL verifications for this session") disable_insecure_request_warnings(True) diff --git a/opl/investigator/elasticsearch_decisions.py b/opl/investigator/elasticsearch_decisions.py index b090413..1996e00 100644 --- a/opl/investigator/elasticsearch_decisions.py +++ b/opl/investigator/elasticsearch_decisions.py @@ -8,7 +8,7 @@ def store(server, index, decisions, **kwargs): es_server_user = kwargs.get("es_server_user") - es_server_pass = kwargs.get("es_server_pass") + decisions_es_server_pass_env_var = kwargs.get("es_server_pass") # This is our workaround on how to add additional metadata about the decision job_name = os.environ.get("JOB_NAME", "") build_url = os.environ.get("BUILD_URL", "") @@ -28,9 +28,9 @@ def store(server, index, decisions, **kwargs): f"Storing decision to ES url={url}, headers={headers} and json={json.dumps(decision)}" ) - if es_server_user and es_server_pass: + if es_server_user and decisions_es_server_pass_env_var: # fetch the password from Jenkins credentials - open_search_password = os.environ.get(es_server_pass) + open_search_password = os.environ.get(decisions_es_server_pass_env_var) response = requests.post( url, auth=requests.auth.HTTPBasicAuth(es_server_user, open_search_password), diff --git a/opl/investigator/elasticsearch_loader.py b/opl/investigator/elasticsearch_loader.py index 8b67797..a38ad19 100644 --- a/opl/investigator/elasticsearch_loader.py +++ b/opl/investigator/elasticsearch_loader.py @@ -10,7 +10,7 @@ def load(server, index, query, paths, **kwargs): es_server_user = kwargs.get("es_server_user") - es_server_pass = kwargs.get("es_server_pass") + es_server_pass_env_var = kwargs.get("es_server_pass") out = {} @@ -26,9 +26,9 @@ def load(server, index, query, paths, **kwargs): f"Querying ES with url={url}, headers={headers} and json={json.dumps(data)}" ) - if es_server_user and es_server_pass: + if es_server_user and es_server_pass_env_var: # fetch the password from Jenkins credentials - open_search_password = os.environ.get(es_server_pass) + open_search_password = os.environ.get(es_server_pass_env_var) response = opl.http.get( url, auth=HTTPBasicAuth(es_server_user, open_search_password), diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index c23ed02..410d72f 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -123,7 +123,7 @@ def main(): args.history_es_query, args.sets, es_server_user=getattr(args, "es_server_user", None), - es_server_pass=getattr(args, "es_server_pass_env_var", None), + es_server_pass_env_var=getattr(args, "es_server_pass_env_var", None), ) elif args.history_type == "sd_dir": @@ -214,7 +214,9 @@ def main(): args.decisions_es_index, info_all, es_server_user=getattr(args, "decisions_es_server_user", None), - es_server_pass=getattr(args, "decisions_es_server_pass_env_var", None), + decisions_es_server_pass_env_var=getattr( + args, "decisions_es_server_pass_env_var", None + ), ) if d_type == "csv": opl.investigator.csv_decisions.store(args.decisions_filename, info_all) From e0f20be7a17d681a2cdeb0978f41a353a0f73d1f Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 15 Jan 2024 07:42:37 -0500 Subject: [PATCH 09/19] updated files with more comments --- opl/investigator/config.py | 6 +++--- opl/investigator/elasticsearch_decisions.py | 4 ++-- opl/pass_or_fail.py | 12 ++++++------ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/opl/investigator/config.py b/opl/investigator/config.py index 08a9d0f..4f94cc6 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -76,9 +76,9 @@ def load_config(conf, fp): conf.history_es_index = data["history"]["es_index"] conf.history_es_query = data["history"]["es_query"] if "es_server_user" in data["history"]: - conf.es_server_user = data["history"]["es_server_user"] - conf.es_server_pass_env_var = data["history"]["es_server_pass_env_var"] - conf.es_server_verify = data["history"]["es_server_verify"] + conf.history_es_server_user = data["history"]["es_server_user"] + conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] + conf.history_es_server_verify = data["history"]["es_server_verify"] if conf.history_type == "sd_dir": conf.history_dir = data["history"]["dir"] diff --git a/opl/investigator/elasticsearch_decisions.py b/opl/investigator/elasticsearch_decisions.py index 1996e00..1b50b93 100644 --- a/opl/investigator/elasticsearch_decisions.py +++ b/opl/investigator/elasticsearch_decisions.py @@ -7,8 +7,8 @@ def store(server, index, decisions, **kwargs): - es_server_user = kwargs.get("es_server_user") - decisions_es_server_pass_env_var = kwargs.get("es_server_pass") + es_server_user = kwargs.get("decisions_es_server_user") + decisions_es_server_pass_env_var = kwargs.get("es_server_pass_env_var") # This is our workaround on how to add additional metadata about the decision job_name = os.environ.get("JOB_NAME", "") build_url = os.environ.get("BUILD_URL", "") diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index 410d72f..9fa4880 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -114,7 +114,7 @@ def main(): if args.history_type == "csv": history = opl.investigator.csv_loader.load(args.history_file, args.sets) elif args.history_type == "elasticsearch": - if hasattr(args, "es_server_verify"): + if hasattr(args, "history_es_server_verify") and not args.history_es_server_verify: # SSL verification is disabled by default opl.http.insecure() history = opl.investigator.elasticsearch_loader.load( @@ -122,8 +122,8 @@ def main(): args.history_es_index, args.history_es_query, args.sets, - es_server_user=getattr(args, "es_server_user", None), - es_server_pass_env_var=getattr(args, "es_server_pass_env_var", None), + es_server_user=getattr(args, "history_es_server_user", None), + es_server_pass_env_var=getattr(args, "history_es_server_pass_env_var", None), ) elif args.history_type == "sd_dir": @@ -206,15 +206,15 @@ def main(): if not args.dry_run: for d_type in args.decisions_type: if d_type == "elasticsearch": - if hasattr(args, "es_server_verify"): + if hasattr(args, "es_server_verify") and not args.es_server_verify: # disable SSL verification opl.http.insecure() opl.investigator.elasticsearch_decisions.store( args.decisions_es_server, args.decisions_es_index, info_all, - es_server_user=getattr(args, "decisions_es_server_user", None), - decisions_es_server_pass_env_var=getattr( + decisions_es_server_user=getattr(args, "decisions_es_server_user", None), + es_server_pass_env_var=getattr( args, "decisions_es_server_pass_env_var", None ), ) From 65dcb1e7b1baeb5f31df81c4dee9e790aaed8b71 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 15 Jan 2024 08:13:01 -0500 Subject: [PATCH 10/19] updated another round of comments --- opl/investigator/config.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/opl/investigator/config.py b/opl/investigator/config.py index 4f94cc6..4b1e9f2 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -75,10 +75,9 @@ def load_config(conf, fp): assert not conf.history_es_server.endswith("/") conf.history_es_index = data["history"]["es_index"] conf.history_es_query = data["history"]["es_query"] - if "es_server_user" in data["history"]: - conf.history_es_server_user = data["history"]["es_server_user"] - conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] - conf.history_es_server_verify = data["history"]["es_server_verify"] + conf.history_es_server_user = data["history"]["es_server_user"] + conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] + conf.history_es_server_verify = data["history"]["es_server_verify"] if conf.history_type == "sd_dir": conf.history_dir = data["history"]["dir"] @@ -92,12 +91,11 @@ def load_config(conf, fp): conf.decisions_es_server = data["decisions"]["es_server"] assert not conf.decisions_es_server.endswith("/") conf.decisions_es_index = data["decisions"]["es_index"] - if "es_server_user" in data["decisions"]: - conf.decisions_es_server_user = data["decisions"]["es_server_user"] - conf.decisions_es_server_pass_env_var = data["decisions"][ - "decisions_es_server_pass_env_var" - ] - conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] + conf.decisions_es_server_user = data["decisions"]["es_server_user"] + conf.decisions_es_server_pass_env_var = data["decisions"][ + "decisions_es_server_pass_env_var" + ] + conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] if conf.decisions_type == "csv": conf.decisions_filename = data["decisions"]["filename"] From 830882a3d37a1e6f79c95706b00d33513b8586a5 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 15 Jan 2024 11:18:12 -0500 Subject: [PATCH 11/19] updated files after comments --- opl/investigator/config.py | 23 ++++++++++++++------- opl/investigator/elasticsearch_decisions.py | 2 +- opl/investigator/elasticsearch_loader.py | 2 +- opl/pass_or_fail.py | 2 +- 4 files changed, 18 insertions(+), 11 deletions(-) diff --git a/opl/investigator/config.py b/opl/investigator/config.py index 4b1e9f2..7cd5cb2 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -75,9 +75,13 @@ def load_config(conf, fp): assert not conf.history_es_server.endswith("/") conf.history_es_index = data["history"]["es_index"] conf.history_es_query = data["history"]["es_query"] - conf.history_es_server_user = data["history"]["es_server_user"] - conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] - conf.history_es_server_verify = data["history"]["es_server_verify"] + if "es_server_user" in data["history"]: + conf.history_es_server_user = data["history"]["es_server_user"] + conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] + if "es_server_verify" in data["history"]: + conf.history_es_server_verify = data["history"]["es_server_verify"] + else: + conf.history_es_server_verify = True if conf.history_type == "sd_dir": conf.history_dir = data["history"]["dir"] @@ -91,11 +95,14 @@ def load_config(conf, fp): conf.decisions_es_server = data["decisions"]["es_server"] assert not conf.decisions_es_server.endswith("/") conf.decisions_es_index = data["decisions"]["es_index"] - conf.decisions_es_server_user = data["decisions"]["es_server_user"] - conf.decisions_es_server_pass_env_var = data["decisions"][ - "decisions_es_server_pass_env_var" - ] - conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] + if "es_server_user" in data["decisions"]: + conf.decisions_es_server_user = data["decisions"]["es_server_user"] + conf.decisions_es_server_pass_env_var = data["decisions"]["es_server_pass_env_var"] + if "es_server_verify" in data["decisions"]: + conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] + else: + conf.decisions_es_server_verify = True + if conf.decisions_type == "csv": conf.decisions_filename = data["decisions"]["filename"] diff --git a/opl/investigator/elasticsearch_decisions.py b/opl/investigator/elasticsearch_decisions.py index 1b50b93..a8d3e57 100644 --- a/opl/investigator/elasticsearch_decisions.py +++ b/opl/investigator/elasticsearch_decisions.py @@ -7,7 +7,7 @@ def store(server, index, decisions, **kwargs): - es_server_user = kwargs.get("decisions_es_server_user") + es_server_user = kwargs.get("es_server_user") decisions_es_server_pass_env_var = kwargs.get("es_server_pass_env_var") # This is our workaround on how to add additional metadata about the decision job_name = os.environ.get("JOB_NAME", "") diff --git a/opl/investigator/elasticsearch_loader.py b/opl/investigator/elasticsearch_loader.py index a38ad19..f2b6d60 100644 --- a/opl/investigator/elasticsearch_loader.py +++ b/opl/investigator/elasticsearch_loader.py @@ -10,7 +10,7 @@ def load(server, index, query, paths, **kwargs): es_server_user = kwargs.get("es_server_user") - es_server_pass_env_var = kwargs.get("es_server_pass") + es_server_pass_env_var = kwargs.get("es_server_pass_env_var") out = {} diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index 9fa4880..7b0c0c6 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -213,7 +213,7 @@ def main(): args.decisions_es_server, args.decisions_es_index, info_all, - decisions_es_server_user=getattr(args, "decisions_es_server_user", None), + es_server_user=getattr(args, "decisions_es_server_user", None), es_server_pass_env_var=getattr( args, "decisions_es_server_pass_env_var", None ), From 163206ad16cbb18ace15eaade152d7048ed4b55c Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Fri, 12 Jan 2024 11:27:35 +0530 Subject: [PATCH 12/19] Convert prow-to-es functions to python --- core/opl/shovel.py | 290 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 271 insertions(+), 19 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index 405d709..b12c11d 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -1,19 +1,31 @@ import argparse import logging +import requests +import json +import subprocess +import os -from . import skelet +from . import skelet,status_data class pluginProw: def __init__(self, args): - print("Hello from pluginProw init") + print("Hello from pluginProw init 2") self.logger = logging.getLogger("opl.showel.pluginProw") + def list(self): - print("Hello from pluginProw list") + print("Hi from pluginProw list") - def download(self): - print("Hello from pluginProw download") + def download(self,args): + from_url = f'{args.prow_base_url}/{args.prow_job_name}/{args.prow_test_name}/{args.prow_artifact_path}' + to_path = f'{args.prow_data_file}' + if not os.path.isfile(to_path): + print(f"INFO: Downloading {from_url} ... ", end="") + subprocess.run(["curl", "-Ssl", "-o", to_path, from_url], check=True) + print("DONE") + else: + print(f"DEBUG: File {to_path} already present, skipping download") @staticmethod def args(parser, group_actions): @@ -58,16 +70,36 @@ def args(parser, group_actions): default="redhat-appstudio-load-test/artifacts/load-tests.json", help="Path to the artifact", ) - + group.add_argument( + "--prow-data-file", + help="prow data jdon file" + ) class pluginOpenSearch: def __init__(self, args): print("Hello from pluginOpenSearch init") self.logger = logging.getLogger("opl.showel.pluginOpenSearch") - def upload(self): + def upload(self,args): print("Hello from pluginOpenSearch upload") - + if args.data_file == None: + raise Exception("A Data file is needed to work with --opensearch-upload") + elif args.end_timestamp == None: + raise Exception("End timestamp is needed to work with --opensearch-upload") + else: + json_data=json.dumps({"query":{"match":{"endTimestamp":args.end_timestamp}}}) + headers={ + "Content-Type": "application/json" + } + jsonFile = open(args.data_file, 'r') + values = json.load(jsonFile) + current_doc_in_es=requests.get(f'{args.es_host_url}/{args.es_index}/_search',headers=headers,data=json_data) + if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: + print("Uploading to ES...") + requests.post(f'{args.es_host_url}/{args.es_index}/_doc',headers=headers,data=json.dumps(values)) + else: + print("INFO: Already in ES, skipping upload") + @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -83,19 +115,143 @@ def args(parser, group_actions): title="opensearch", description="Options needed to work with OpenSearch", ) - + group.add_argument( + "--es-index", + default="rhtap-ci-status-data", + help="Elastic search index where the data will be stored" + ) + group.add_argument( + "--data-file", + help="json file to upload to elastic search" + ) + group.add_argument( + "--end-timestamp", + help="timestamp when the test ended" + ) class pluginHorreum: def __init__(self, args): print("Hello from pluginHorreum init") self.logger = logging.getLogger("opl.showel.pluginHorreum") - def upload(self): - print("Hello from pluginHorreum upload") + def upload(self,args): + if args.horreum_data_file == None: + raise Exception("Horreum data file is required to work with --horreum-upload") + elif args.horreum_host == None: + raise Exception("Horreum host is required to work with --horreum-upload") + elif args.token == None: + raise Exception("Authorisation Token is required to work with --horreum-upload") + elif args.job_name== None: + raise Exception("Job name is required to work with --horreum-upload") + else: + headers={ + "Content-Type": "application/json", + "Authorization": f'Bearer {args.token}' + } + jsonFile = open(args.horreum_data_file, 'r') + values = json.load(jsonFile) + jsonFile.close() + test_matcher=values[args.test_job_matcher] + test_start=values["timestamp"] + if test_start == "None": + test_start=values["startTimestamp"] + test_end = values["endTimestamp"] + if not test_start or not test_end or test_start == "None" or test_end == "None": + raise Exception("ERROR: We need start and end time in the JSON we are supposed to upload") + response = requests.get(f'{args.horreum_host}/api/test/byName/{args.test_name_horreum}',headers=headers) + test_id = json.load(response.text)["id"] + page=0 + while True: + data={"from":page,"size":100,"sort": [{"date": {"order": "desc"}}]} + page_data=requests.get(f'{args.horreum_host}/api/run/list/{test_id}',headers=headers,data=data) + page=page+100 + try: + json.loads(page_data) + except ValueError as e: + raise Exception(e) + page_data_values=json.loads(page_data) + page_data_len=len(page_data_values["runs"]) + if page_data_len==0: + print("INFO: No more Horreum test results to review, proceeding with upload") + break + for runs in page_data_values["runs"]: + test_run_id=runs["id"] + test_run_data=requests.get(f'{args.horreum_host}/api/run/{test_run_id}') + try: + json.loads(test_run_data) + except ValueError as e: + raise Exception(e) + test_run_data_values=json.loads(test_run_data) + test_run_matcher=test_run_data_values["data"][args.test_job_matcher] + if test_run_matcher=="null": + print(f'WARNING: Test run {test_run_id} for test {args.test_name_horreum}/{test_id} does not have {args.test_job_matcher}, skipping') + continue + if test_matcher==test_run_matcher: + raise Exception("INFO: Test result found in Horreum, skipping upload") - def result(self): + print("INFO: Uploading to Horreum ... ") + params={ + "test":args.test_name_horreum, + "start":test_start, + "stop":test_end, + "owner":args.test_owner, + "access":args.test_access + } + requests.post(f'{args.horreum_host}/api/run/data',params=params,headers=headers,data=json.dumps(values)) + + def result(self,args): print("Hello from pluginHorreum result") + if args.id_array == None: + raise Exception("Id array json file is needed to work with --horreum-result") + elif args.horreum_data_file == None: + raise Exception("Horreum data file is required to work with --horreum-result") + else: + values=json.loads(args.id_array) + is_fail=0 + for i in values: + id_value=i["id"] + jsonFile = open(args.horreum_data_file, 'r') + values = json.load(jsonFile) + jsonFile.close() + test_start=values["timestamp"] + if test_start == "None": + test_start=values["startTimestamp"] + test_end = values["endTimestamp"] + range_data = { + "range": { + "from": test_start, + "to": test_end, + "oneBeforeAndAfter": True + } + } + + # Create a dictionary with the annotation query + annotation_data = { + "annotation": { + "query": id_value + } + } + + # Write the range data to a JSON file + with open('/tmp/annotationQuery.json', 'w') as file: + json.dump(range_data, file) + # Append the annotation data to the JSON file + with open('/tmp/annotationQuery.json', 'a') as file: + json.dump(annotation_data, file) + + # Send a POST request to the API and retrieve the result using curl and jq + curl_command = f"curl https://{args.horreum_host}/api/changes/annotations -s -H 'content-type: application/json' -d @/tmp/annotationQuery.json | jq -r ." + result = subprocess.check_output(curl_command, shell=True).decode('utf-8').strip() + + # Check if the result is not an empty list + if result != "[]": + is_fail = 1 + status_data.doit_set(args.horreum_data_file,{"result":"FAIL"}) + break + if is_fail != 1: + status_data.doit_set(args.horreum_data_file,{"result":"PASS"}) + @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -119,16 +275,80 @@ def args(parser, group_actions): title="horreum", description="Options needed to work with Horreum", ) - + group.add_argument( + "--horreum-data-file", + help="Data file to upload to Horreum" + ) + group.add_argument( + "--horreum-host", + help="Horreum host url" + ) + group.add_argument( + "--token", + help="Authorisation token" + ) + group.add_argument( + "--job-name", + help="Job name" + ) + group.add_argument( + "--test-name-horreum", + default="load-tests-result", + help="Test Name" + ) + group.add_argument( + "--test-job-matcher", + default="jobName" + ) + group.add_argument( + "--test-owner", + default="rhtap-perf-test-team" + ) + group.add_argument( + "--test-access", + default="PUBLIC" + ) + group.add_argument( + "--id-array", + help="Variable id array we wish to get timeseries data for" + ) class pluginResultsDashboard: def __init__(self, args): print("Hello from pluginResultsDashboard init") self.logger = logging.getLogger("opl.showel.pluginResultsDashboard") - def upload(self): + def upload(self,args): print("Hello from pluginResultsDashboard upload") - + if args.status_data is None: + raise Exception("Status data file is mandatory to work with --results-dashboard-upload") + elif args.es_host_url is None: + raise Exception("ES host url is required to work with --results-dashboard-upload") + elif args.group_name is None: + raise Exception("Group Name is mandatory to work with --results-dashboard-upload") + elif args.product_name is None: + raise Exception("Product Name is mandatory to work with --results-dashboard-upload") + elif args.test_name is None: + raise Exception("Test Name is mandatory to work with --results-dashboard-upload") + else: + jsonFile = open(args.status_data, 'r') + values = json.load(jsonFile) + jsonFile.close() + date=values["timestamp"] + link=values["jobLink"] + result=values["result"] + result_id=values["metadata"]["env"]["BUILD_ID"] + json_data=json.dumps({"query":{"bool":{"filter":[{"term":{"result_id.keyword":f"{result_id}"}}]}}}) + headers={ + "Content-Type": "application/json" + } + current_doc_in_es=requests.get(f'{args.es_host_url}/{args.dashboard_es_index}/_search',headers=headers,data=json_data) + if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: + print("Uploading to results dashboard") + upload_data=json.dumps({"date":date,"group":args.group_name,"link":link,"product":args.product_name,"release":args.release,"result":result,"result_id":result_id,"test":args.test_name,"version":args.version}) + requests.post(f'{args.es_host_url}/{args.dashboard_es_index}/_doc',headers=headers,data=upload_data) + else: + print("INFO: Already in Results Dashboard ES, skipping upload") @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -139,13 +359,45 @@ def args(parser, group_actions): const=("resultsdashboard", "upload"), help="Upload file to Results Dashboard if not already there", ) - group = parser.add_argument_group( title="resultsdashboard", description="Options needed to work with Results Dashboard", ) - - + group.add_argument( + "--es-host-url", + help="Elastic search host url", + ) + group.add_argument( + "--dashboard-es-index", + default="results-dashboard-data", + help="Elastic search index where the result is stored", + ) + group.add_argument( + "--status-data", + help="File where we maintain metadata, results, parameters and measurements for this test run", + ) + group.add_argument( + "--group-name", + help="Name of the group where the product belongs" + ) + group.add_argument( + "--product-name", + help="Name of the Product" + ) + group.add_argument( + "--release", + default="latest", + help="Type of release of Product for e.g latest,nightly,weekly", + ) + group.add_argument( + "--test-name", + help="Name of the CPT test" + ) + group.add_argument( + "--version", + default="1", + help="Version of the product on which the test ran" + ) PLUGINS = { "prow": pluginProw, "opensearch": pluginOpenSearch, @@ -174,4 +426,4 @@ def main(): ) plugin_object = PLUGINS[plugin_name] plugin_instance = plugin_object(args) - getattr(plugin_instance, function_name)() + getattr(plugin_instance, function_name)(args) From e25425701d027933a4e2857ae08cc8a2abf8a8ad Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Fri, 12 Jan 2024 11:39:55 +0530 Subject: [PATCH 13/19] Applied black --- core/opl/shovel.py | 368 +++++++++++++++++++++++++-------------------- 1 file changed, 209 insertions(+), 159 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index b12c11d..216c4b8 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -5,27 +5,26 @@ import subprocess import os -from . import skelet,status_data +from . import skelet, status_data class pluginProw: def __init__(self, args): print("Hello from pluginProw init 2") self.logger = logging.getLogger("opl.showel.pluginProw") - def list(self): print("Hi from pluginProw list") - def download(self,args): - from_url = f'{args.prow_base_url}/{args.prow_job_name}/{args.prow_test_name}/{args.prow_artifact_path}' - to_path = f'{args.prow_data_file}' - if not os.path.isfile(to_path): - print(f"INFO: Downloading {from_url} ... ", end="") - subprocess.run(["curl", "-Ssl", "-o", to_path, from_url], check=True) - print("DONE") - else: - print(f"DEBUG: File {to_path} already present, skipping download") + def download(self, args): + from_url = f"{args.prow_base_url}/{args.prow_job_name}/{args.prow_test_name}/{args.prow_artifact_path}" + to_path = f"{args.prow_data_file}" + if not os.path.isfile(to_path): + print(f"INFO: Downloading {from_url} ... ", end="") + subprocess.run(["curl", "-Ssl", "-o", to_path, from_url], check=True) + print("DONE") + else: + print(f"DEBUG: File {to_path} already present, skipping download") @staticmethod def args(parser, group_actions): @@ -70,36 +69,42 @@ def args(parser, group_actions): default="redhat-appstudio-load-test/artifacts/load-tests.json", help="Path to the artifact", ) - group.add_argument( - "--prow-data-file", - help="prow data jdon file" - ) + group.add_argument("--prow-data-file", help="prow data jdon file") + class pluginOpenSearch: def __init__(self, args): print("Hello from pluginOpenSearch init") self.logger = logging.getLogger("opl.showel.pluginOpenSearch") - def upload(self,args): + def upload(self, args): print("Hello from pluginOpenSearch upload") if args.data_file == None: raise Exception("A Data file is needed to work with --opensearch-upload") elif args.end_timestamp == None: raise Exception("End timestamp is needed to work with --opensearch-upload") else: - json_data=json.dumps({"query":{"match":{"endTimestamp":args.end_timestamp}}}) - headers={ - "Content-Type": "application/json" - } - jsonFile = open(args.data_file, 'r') + json_data = json.dumps( + {"query": {"match": {"endTimestamp": args.end_timestamp}}} + ) + headers = {"Content-Type": "application/json"} + jsonFile = open(args.data_file, "r") values = json.load(jsonFile) - current_doc_in_es=requests.get(f'{args.es_host_url}/{args.es_index}/_search',headers=headers,data=json_data) + current_doc_in_es = requests.get( + f"{args.es_host_url}/{args.es_index}/_search", + headers=headers, + data=json_data, + ) if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: print("Uploading to ES...") - requests.post(f'{args.es_host_url}/{args.es_index}/_doc',headers=headers,data=json.dumps(values)) + requests.post( + f"{args.es_host_url}/{args.es_index}/_doc", + headers=headers, + data=json.dumps(values), + ) else: print("INFO: Already in ES, skipping upload") - + @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -118,140 +123,176 @@ def args(parser, group_actions): group.add_argument( "--es-index", default="rhtap-ci-status-data", - help="Elastic search index where the data will be stored" - ) - group.add_argument( - "--data-file", - help="json file to upload to elastic search" - ) - group.add_argument( - "--end-timestamp", - help="timestamp when the test ended" + help="Elastic search index where the data will be stored", ) + group.add_argument("--data-file", help="json file to upload to elastic search") + group.add_argument("--end-timestamp", help="timestamp when the test ended") + class pluginHorreum: def __init__(self, args): print("Hello from pluginHorreum init") self.logger = logging.getLogger("opl.showel.pluginHorreum") - def upload(self,args): + def upload(self, args): if args.horreum_data_file == None: - raise Exception("Horreum data file is required to work with --horreum-upload") + raise Exception( + "Horreum data file is required to work with --horreum-upload" + ) elif args.horreum_host == None: raise Exception("Horreum host is required to work with --horreum-upload") elif args.token == None: - raise Exception("Authorisation Token is required to work with --horreum-upload") - elif args.job_name== None: + raise Exception( + "Authorisation Token is required to work with --horreum-upload" + ) + elif args.job_name == None: raise Exception("Job name is required to work with --horreum-upload") else: - headers={ + headers = { "Content-Type": "application/json", - "Authorization": f'Bearer {args.token}' + "Authorization": f"Bearer {args.token}", } - jsonFile = open(args.horreum_data_file, 'r') + jsonFile = open(args.horreum_data_file, "r") values = json.load(jsonFile) jsonFile.close() - test_matcher=values[args.test_job_matcher] - test_start=values["timestamp"] + test_matcher = values[args.test_job_matcher] + test_start = values["timestamp"] if test_start == "None": - test_start=values["startTimestamp"] + test_start = values["startTimestamp"] test_end = values["endTimestamp"] - if not test_start or not test_end or test_start == "None" or test_end == "None": - raise Exception("ERROR: We need start and end time in the JSON we are supposed to upload") - response = requests.get(f'{args.horreum_host}/api/test/byName/{args.test_name_horreum}',headers=headers) + if ( + not test_start + or not test_end + or test_start == "None" + or test_end == "None" + ): + raise Exception( + "ERROR: We need start and end time in the JSON we are supposed to upload" + ) + response = requests.get( + f"{args.horreum_host}/api/test/byName/{args.test_name_horreum}", + headers=headers, + ) test_id = json.load(response.text)["id"] - page=0 + page = 0 while True: - data={"from":page,"size":100,"sort": [{"date": {"order": "desc"}}]} - page_data=requests.get(f'{args.horreum_host}/api/run/list/{test_id}',headers=headers,data=data) - page=page+100 + data = { + "from": page, + "size": 100, + "sort": [{"date": {"order": "desc"}}], + } + page_data = requests.get( + f"{args.horreum_host}/api/run/list/{test_id}", + headers=headers, + data=data, + ) + page = page + 100 try: json.loads(page_data) except ValueError as e: raise Exception(e) - page_data_values=json.loads(page_data) - page_data_len=len(page_data_values["runs"]) - if page_data_len==0: - print("INFO: No more Horreum test results to review, proceeding with upload") + page_data_values = json.loads(page_data) + page_data_len = len(page_data_values["runs"]) + if page_data_len == 0: + print( + "INFO: No more Horreum test results to review, proceeding with upload" + ) break for runs in page_data_values["runs"]: - test_run_id=runs["id"] - test_run_data=requests.get(f'{args.horreum_host}/api/run/{test_run_id}') + test_run_id = runs["id"] + test_run_data = requests.get( + f"{args.horreum_host}/api/run/{test_run_id}" + ) try: json.loads(test_run_data) except ValueError as e: raise Exception(e) - test_run_data_values=json.loads(test_run_data) - test_run_matcher=test_run_data_values["data"][args.test_job_matcher] - if test_run_matcher=="null": - print(f'WARNING: Test run {test_run_id} for test {args.test_name_horreum}/{test_id} does not have {args.test_job_matcher}, skipping') + test_run_data_values = json.loads(test_run_data) + test_run_matcher = test_run_data_values["data"][ + args.test_job_matcher + ] + if test_run_matcher == "null": + print( + f"WARNING: Test run {test_run_id} for test {args.test_name_horreum}/{test_id} does not have {args.test_job_matcher}, skipping" + ) continue - if test_matcher==test_run_matcher: - raise Exception("INFO: Test result found in Horreum, skipping upload") + if test_matcher == test_run_matcher: + raise Exception( + "INFO: Test result found in Horreum, skipping upload" + ) print("INFO: Uploading to Horreum ... ") - params={ - "test":args.test_name_horreum, - "start":test_start, - "stop":test_end, - "owner":args.test_owner, - "access":args.test_access + params = { + "test": args.test_name_horreum, + "start": test_start, + "stop": test_end, + "owner": args.test_owner, + "access": args.test_access, } - requests.post(f'{args.horreum_host}/api/run/data',params=params,headers=headers,data=json.dumps(values)) + requests.post( + f"{args.horreum_host}/api/run/data", + params=params, + headers=headers, + data=json.dumps(values), + ) - def result(self,args): + def result(self, args): print("Hello from pluginHorreum result") if args.id_array == None: - raise Exception("Id array json file is needed to work with --horreum-result") + raise Exception( + "Id array json file is needed to work with --horreum-result" + ) elif args.horreum_data_file == None: - raise Exception("Horreum data file is required to work with --horreum-result") + raise Exception( + "Horreum data file is required to work with --horreum-result" + ) else: - values=json.loads(args.id_array) - is_fail=0 + values = json.loads(args.id_array) + is_fail = 0 for i in values: - id_value=i["id"] - jsonFile = open(args.horreum_data_file, 'r') + id_value = i["id"] + jsonFile = open(args.horreum_data_file, "r") values = json.load(jsonFile) jsonFile.close() - test_start=values["timestamp"] + test_start = values["timestamp"] if test_start == "None": - test_start=values["startTimestamp"] + test_start = values["startTimestamp"] test_end = values["endTimestamp"] range_data = { - "range": { - "from": test_start, - "to": test_end, - "oneBeforeAndAfter": True - } - } + "range": { + "from": test_start, + "to": test_end, + "oneBeforeAndAfter": True, + } + } # Create a dictionary with the annotation query - annotation_data = { - "annotation": { - "query": id_value - } - } + annotation_data = {"annotation": {"query": id_value}} # Write the range data to a JSON file - with open('/tmp/annotationQuery.json', 'w') as file: + with open("/tmp/annotationQuery.json", "w") as file: json.dump(range_data, file) # Append the annotation data to the JSON file - with open('/tmp/annotationQuery.json', 'a') as file: + with open("/tmp/annotationQuery.json", "a") as file: json.dump(annotation_data, file) # Send a POST request to the API and retrieve the result using curl and jq curl_command = f"curl https://{args.horreum_host}/api/changes/annotations -s -H 'content-type: application/json' -d @/tmp/annotationQuery.json | jq -r ." - result = subprocess.check_output(curl_command, shell=True).decode('utf-8').strip() + result = ( + subprocess.check_output(curl_command, shell=True) + .decode("utf-8") + .strip() + ) # Check if the result is not an empty list if result != "[]": - is_fail = 1 - status_data.doit_set(args.horreum_data_file,{"result":"FAIL"}) - break + is_fail = 1 + status_data.doit_set(args.horreum_data_file, {"result": "FAIL"}) + break if is_fail != 1: - status_data.doit_set(args.horreum_data_file,{"result":"PASS"}) - + status_data.doit_set(args.horreum_data_file, {"result": "PASS"}) + @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -275,80 +316,94 @@ def args(parser, group_actions): title="horreum", description="Options needed to work with Horreum", ) + group.add_argument("--horreum-data-file", help="Data file to upload to Horreum") + group.add_argument("--horreum-host", help="Horreum host url") + group.add_argument("--token", help="Authorisation token") + group.add_argument("--job-name", help="Job name") group.add_argument( - "--horreum-data-file", - help="Data file to upload to Horreum" - ) - group.add_argument( - "--horreum-host", - help="Horreum host url" - ) - group.add_argument( - "--token", - help="Authorisation token" + "--test-name-horreum", default="load-tests-result", help="Test Name" ) + group.add_argument("--test-job-matcher", default="jobName") + group.add_argument("--test-owner", default="rhtap-perf-test-team") + group.add_argument("--test-access", default="PUBLIC") group.add_argument( - "--job-name", - help="Job name" - ) - group.add_argument( - "--test-name-horreum", - default="load-tests-result", - help="Test Name" - ) - group.add_argument( - "--test-job-matcher", - default="jobName" - ) - group.add_argument( - "--test-owner", - default="rhtap-perf-test-team" - ) - group.add_argument( - "--test-access", - default="PUBLIC" - ) - group.add_argument( - "--id-array", - help="Variable id array we wish to get timeseries data for" + "--id-array", help="Variable id array we wish to get timeseries data for" ) + class pluginResultsDashboard: def __init__(self, args): print("Hello from pluginResultsDashboard init") self.logger = logging.getLogger("opl.showel.pluginResultsDashboard") - def upload(self,args): + def upload(self, args): print("Hello from pluginResultsDashboard upload") if args.status_data is None: - raise Exception("Status data file is mandatory to work with --results-dashboard-upload") + raise Exception( + "Status data file is mandatory to work with --results-dashboard-upload" + ) elif args.es_host_url is None: - raise Exception("ES host url is required to work with --results-dashboard-upload") + raise Exception( + "ES host url is required to work with --results-dashboard-upload" + ) elif args.group_name is None: - raise Exception("Group Name is mandatory to work with --results-dashboard-upload") + raise Exception( + "Group Name is mandatory to work with --results-dashboard-upload" + ) elif args.product_name is None: - raise Exception("Product Name is mandatory to work with --results-dashboard-upload") + raise Exception( + "Product Name is mandatory to work with --results-dashboard-upload" + ) elif args.test_name is None: - raise Exception("Test Name is mandatory to work with --results-dashboard-upload") + raise Exception( + "Test Name is mandatory to work with --results-dashboard-upload" + ) else: - jsonFile = open(args.status_data, 'r') + jsonFile = open(args.status_data, "r") values = json.load(jsonFile) jsonFile.close() - date=values["timestamp"] - link=values["jobLink"] - result=values["result"] - result_id=values["metadata"]["env"]["BUILD_ID"] - json_data=json.dumps({"query":{"bool":{"filter":[{"term":{"result_id.keyword":f"{result_id}"}}]}}}) - headers={ - "Content-Type": "application/json" - } - current_doc_in_es=requests.get(f'{args.es_host_url}/{args.dashboard_es_index}/_search',headers=headers,data=json_data) + date = values["timestamp"] + link = values["jobLink"] + result = values["result"] + result_id = values["metadata"]["env"]["BUILD_ID"] + json_data = json.dumps( + { + "query": { + "bool": { + "filter": [{"term": {"result_id.keyword": f"{result_id}"}}] + } + } + } + ) + headers = {"Content-Type": "application/json"} + current_doc_in_es = requests.get( + f"{args.es_host_url}/{args.dashboard_es_index}/_search", + headers=headers, + data=json_data, + ) if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: print("Uploading to results dashboard") - upload_data=json.dumps({"date":date,"group":args.group_name,"link":link,"product":args.product_name,"release":args.release,"result":result,"result_id":result_id,"test":args.test_name,"version":args.version}) - requests.post(f'{args.es_host_url}/{args.dashboard_es_index}/_doc',headers=headers,data=upload_data) + upload_data = json.dumps( + { + "date": date, + "group": args.group_name, + "link": link, + "product": args.product_name, + "release": args.release, + "result": result, + "result_id": result_id, + "test": args.test_name, + "version": args.version, + } + ) + requests.post( + f"{args.es_host_url}/{args.dashboard_es_index}/_doc", + headers=headers, + data=upload_data, + ) else: print("INFO: Already in Results Dashboard ES, skipping upload") + @staticmethod def args(parser, group_actions): group_actions.add_argument( @@ -377,27 +432,22 @@ def args(parser, group_actions): help="File where we maintain metadata, results, parameters and measurements for this test run", ) group.add_argument( - "--group-name", - help="Name of the group where the product belongs" - ) - group.add_argument( - "--product-name", - help="Name of the Product" + "--group-name", help="Name of the group where the product belongs" ) + group.add_argument("--product-name", help="Name of the Product") group.add_argument( "--release", default="latest", help="Type of release of Product for e.g latest,nightly,weekly", ) - group.add_argument( - "--test-name", - help="Name of the CPT test" - ) + group.add_argument("--test-name", help="Name of the CPT test") group.add_argument( "--version", default="1", - help="Version of the product on which the test ran" + help="Version of the product on which the test ran", ) + + PLUGINS = { "prow": pluginProw, "opensearch": pluginOpenSearch, From 0e3449985c59c4bee5c58351ba9d079f6cfbb864 Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Fri, 12 Jan 2024 11:44:57 +0530 Subject: [PATCH 14/19] Fixc python flake errors --- core/opl/shovel.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index 216c4b8..9c86903 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -79,9 +79,9 @@ def __init__(self, args): def upload(self, args): print("Hello from pluginOpenSearch upload") - if args.data_file == None: + if args.data_file is None: raise Exception("A Data file is needed to work with --opensearch-upload") - elif args.end_timestamp == None: + elif args.end_timestamp is None: raise Exception("End timestamp is needed to work with --opensearch-upload") else: json_data = json.dumps( @@ -135,17 +135,17 @@ def __init__(self, args): self.logger = logging.getLogger("opl.showel.pluginHorreum") def upload(self, args): - if args.horreum_data_file == None: + if args.horreum_data_file is None: raise Exception( "Horreum data file is required to work with --horreum-upload" ) - elif args.horreum_host == None: + elif args.horreum_host is None: raise Exception("Horreum host is required to work with --horreum-upload") - elif args.token == None: + elif args.token is None: raise Exception( "Authorisation Token is required to work with --horreum-upload" ) - elif args.job_name == None: + elif args.job_name is None: raise Exception("Job name is required to work with --horreum-upload") else: headers = { @@ -238,11 +238,11 @@ def upload(self, args): def result(self, args): print("Hello from pluginHorreum result") - if args.id_array == None: + if args.id_array is None: raise Exception( "Id array json file is needed to work with --horreum-result" ) - elif args.horreum_data_file == None: + elif args.horreum_data_file is None: raise Exception( "Horreum data file is required to work with --horreum-result" ) From ae6878791d835f55cba333bc30d04706c5941740 Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Mon, 15 Jan 2024 17:52:00 +0530 Subject: [PATCH 15/19] Review comment changes --- core/opl/shovel.py | 279 +++++++++++++++++++++------------------------ 1 file changed, 127 insertions(+), 152 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index 9c86903..8852757 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -4,27 +4,37 @@ import json import subprocess import os +import re from . import skelet, status_data class pluginProw: def __init__(self, args): - print("Hello from pluginProw init 2") self.logger = logging.getLogger("opl.showel.pluginProw") + self.args = args def list(self): - print("Hi from pluginProw list") - - def download(self, args): - from_url = f"{args.prow_base_url}/{args.prow_job_name}/{args.prow_test_name}/{args.prow_artifact_path}" - to_path = f"{args.prow_data_file}" + response = requests.get(f"{self.args.prow_base_url}/{self.args.prow_job_name}") + # Extract 19-digit numbers using regular expression + numbers = re.findall(r"\b[0-9]{19}\b", response) + + # Sort the numbers in natural order and get the last 10 unique numbers + sorted_numbers = sorted(set(numbers), key=lambda x: int(x)) + last_10_numbers = sorted_numbers[-10:] + return last_10_numbers + + def download(self): + from_url = f"{self.args.prow_base_url}/{self.args.prow_job_name}/{self.args.prow_test_name}/{self.args.prow_artifact_path}" + to_path = f"{self.args.prow_data_file}" if not os.path.isfile(to_path): - print(f"INFO: Downloading {from_url} ... ", end="") - subprocess.run(["curl", "-Ssl", "-o", to_path, from_url], check=True) - print("DONE") + logging.info(f"INFO: Downloading {from_url} ... ", end="") + response = requests.get(f"{from_url}") + with open(to_path, "w") as f: + f.write(response.content) + logging.info("DONE") else: - print(f"DEBUG: File {to_path} already present, skipping download") + logging.info(f"DEBUG: File {to_path} already present, skipping download") @staticmethod def args(parser, group_actions): @@ -69,41 +79,50 @@ def args(parser, group_actions): default="redhat-appstudio-load-test/artifacts/load-tests.json", help="Path to the artifact", ) - group.add_argument("--prow-data-file", help="prow data jdon file") + group.add_argument("--prow-data-file", help="prow data json file") class pluginOpenSearch: def __init__(self, args): - print("Hello from pluginOpenSearch init") self.logger = logging.getLogger("opl.showel.pluginOpenSearch") + self.args = args - def upload(self, args): - print("Hello from pluginOpenSearch upload") - if args.data_file is None: + def upload(self): + if self.args.data_file is None: raise Exception("A Data file is needed to work with --opensearch-upload") - elif args.end_timestamp is None: - raise Exception("End timestamp is needed to work with --opensearch-upload") + elif self.args.matcher_field is None: + raise Exception("Matcher field is needed to work with --opensearch-upload") + elif self.args.matcher_field_value is None: + raise Exception( + "Matcher field value is needed to work with --opensearch-upload" + ) else: json_data = json.dumps( - {"query": {"match": {"endTimestamp": args.end_timestamp}}} + { + "query": { + "match": { + f"{self.args.matcher_field}": self.args.matcher_field_value + } + } + } ) headers = {"Content-Type": "application/json"} - jsonFile = open(args.data_file, "r") + jsonFile = open(self.args.data_file, "r") values = json.load(jsonFile) current_doc_in_es = requests.get( - f"{args.es_host_url}/{args.es_index}/_search", + f"{self.args.es_host_url}/{self.args.es_index}/_search", headers=headers, data=json_data, ) if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: - print("Uploading to ES...") + logging.info("Uploading to ES...") requests.post( - f"{args.es_host_url}/{args.es_index}/_doc", + f"{self.args.es_host_url}/{self.args.es_index}/_doc", headers=headers, data=json.dumps(values), ) else: - print("INFO: Already in ES, skipping upload") + logging.info("INFO: Already in ES, skipping upload") @staticmethod def args(parser, group_actions): @@ -126,172 +145,127 @@ def args(parser, group_actions): help="Elastic search index where the data will be stored", ) group.add_argument("--data-file", help="json file to upload to elastic search") - group.add_argument("--end-timestamp", help="timestamp when the test ended") + group.add_argument( + "--matcher-field", + help="json field which will be used for checking if data exists in ES or not", + ) + group.add_argument("--matcher-field-value", help="value of the matcher field") class pluginHorreum: def __init__(self, args): - print("Hello from pluginHorreum init") self.logger = logging.getLogger("opl.showel.pluginHorreum") + self.args = args - def upload(self, args): - if args.horreum_data_file is None: + def upload(self): + if self.args.horreum_data_file is None: raise Exception( "Horreum data file is required to work with --horreum-upload" ) - elif args.horreum_host is None: + elif self.args.horreum_host is None: raise Exception("Horreum host is required to work with --horreum-upload") - elif args.token is None: + elif self.args.token is None: raise Exception( "Authorisation Token is required to work with --horreum-upload" ) - elif args.job_name is None: + elif self.args.job_name is None: raise Exception("Job name is required to work with --horreum-upload") + elif self.args.test_start is None: + raise Exception("Test start is required to work with --horreum-upload") + elif self.args.test_end is None: + raise Exception("Test end is required to work with --horreum-upload") else: headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {args.token}", + "Authorization": f"Bearer {self.args.token}", } - jsonFile = open(args.horreum_data_file, "r") + jsonFile = open(self.args.horreum_data_file, "r") values = json.load(jsonFile) jsonFile.close() - test_matcher = values[args.test_job_matcher] - test_start = values["timestamp"] - if test_start == "None": - test_start = values["startTimestamp"] - test_end = values["endTimestamp"] - if ( - not test_start - or not test_end - or test_start == "None" - or test_end == "None" - ): - raise Exception( - "ERROR: We need start and end time in the JSON we are supposed to upload" - ) + test_matcher = values[self.args.test_job_matcher] response = requests.get( - f"{args.horreum_host}/api/test/byName/{args.test_name_horreum}", + f"{self.args.horreum_host}/api/test/byName/{self.args.test_name_horreum}", headers=headers, ) test_id = json.load(response.text)["id"] - page = 0 - while True: - data = { - "from": page, - "size": 100, - "sort": [{"date": {"order": "desc"}}], - } - page_data = requests.get( - f"{args.horreum_host}/api/run/list/{test_id}", - headers=headers, - data=data, + filter_data = {f"{self.args.test_matcher}": f"{test_matcher}"} + response = requests.get( + f"{self.args.horreum_host}/api/dataset/list/{test_id}", + headers=headers, + params={"filter": json.dumps(filter_data)}, + ) + datasets = response.json().get("datasets", []) + if len(datasets) > 0: + raise Exception( + f"Test result {self.args.test_matcher}={test_matcher} found in Horreum {datasets}, skipping upload" ) - page = page + 100 - try: - json.loads(page_data) - except ValueError as e: - raise Exception(e) - page_data_values = json.loads(page_data) - page_data_len = len(page_data_values["runs"]) - if page_data_len == 0: - print( - "INFO: No more Horreum test results to review, proceeding with upload" - ) - break - for runs in page_data_values["runs"]: - test_run_id = runs["id"] - test_run_data = requests.get( - f"{args.horreum_host}/api/run/{test_run_id}" - ) - try: - json.loads(test_run_data) - except ValueError as e: - raise Exception(e) - test_run_data_values = json.loads(test_run_data) - test_run_matcher = test_run_data_values["data"][ - args.test_job_matcher - ] - if test_run_matcher == "null": - print( - f"WARNING: Test run {test_run_id} for test {args.test_name_horreum}/{test_id} does not have {args.test_job_matcher}, skipping" - ) - continue - if test_matcher == test_run_matcher: - raise Exception( - "INFO: Test result found in Horreum, skipping upload" - ) - - print("INFO: Uploading to Horreum ... ") + logging.info("INFO: Uploading to Horreum ... ") params = { - "test": args.test_name_horreum, - "start": test_start, - "stop": test_end, - "owner": args.test_owner, - "access": args.test_access, + "test": self.args.test_name_horreum, + "start": self.args.test_start, + "stop": self.args.test_end, + "owner": self.args.test_owner, + "access": self.args.test_access, } requests.post( - f"{args.horreum_host}/api/run/data", + f"{self.args.horreum_host}/api/run/data", params=params, headers=headers, data=json.dumps(values), ) - def result(self, args): - print("Hello from pluginHorreum result") - if args.id_array is None: + def result(self): + if self.args.id_array is None: raise Exception( "Id array json file is needed to work with --horreum-result" ) - elif args.horreum_data_file is None: + elif self.args.horreum_data_file is None: raise Exception( "Horreum data file is required to work with --horreum-result" ) + elif self.args.test_start is None: + raise Exception("Test start is required to work with --horreum-result") + elif self.args.test_end is None: + raise Exception("Test end is required to work with --horreum-result") + elif self.args.test_id is None: + raise Exception("Test id is required to work with --horreum-result") else: - values = json.loads(args.id_array) + values = requests.get( + f"https://{self.args.horreum_host}/api/alerting/variables", + params={"test": self.args.test_id}, + ) + id_array = values.json() is_fail = 0 - for i in values: + for i in id_array: id_value = i["id"] - jsonFile = open(args.horreum_data_file, "r") + jsonFile = open(self.args.horreum_data_file, "r") values = json.load(jsonFile) jsonFile.close() - test_start = values["timestamp"] - if test_start == "None": - test_start = values["startTimestamp"] - test_end = values["endTimestamp"] range_data = { "range": { - "from": test_start, - "to": test_end, + "from": self.args.test_start, + "to": self.args.test_end, "oneBeforeAndAfter": True, - } + }, + "annotation": {"query": id_value}, } - # Create a dictionary with the annotation query - annotation_data = {"annotation": {"query": id_value}} - - # Write the range data to a JSON file - with open("/tmp/annotationQuery.json", "w") as file: - json.dump(range_data, file) - - # Append the annotation data to the JSON file - with open("/tmp/annotationQuery.json", "a") as file: - json.dump(annotation_data, file) - # Send a POST request to the API and retrieve the result using curl and jq - curl_command = f"curl https://{args.horreum_host}/api/changes/annotations -s -H 'content-type: application/json' -d @/tmp/annotationQuery.json | jq -r ." - result = ( - subprocess.check_output(curl_command, shell=True) - .decode("utf-8") - .strip() + result = requests.get( + f"https://{self.args.horreum_host}/api/changes/annotations", + headers={"content-type: application/json"}, + data=json.dumps(range_data), ) # Check if the result is not an empty list if result != "[]": is_fail = 1 - status_data.doit_set(args.horreum_data_file, {"result": "FAIL"}) + status_data.doit_set( + self.args.horreum_data_file, {"result": "FAIL"} + ) break if is_fail != 1: - status_data.doit_set(args.horreum_data_file, {"result": "PASS"}) + status_data.doit_set(self.args.horreum_data_file, {"result": "PASS"}) @staticmethod def args(parser, group_actions): @@ -326,40 +300,41 @@ def args(parser, group_actions): group.add_argument("--test-job-matcher", default="jobName") group.add_argument("--test-owner", default="rhtap-perf-test-team") group.add_argument("--test-access", default="PUBLIC") + group.add_argument("--test-start", help="time when the test started") + group.add_argument("--test-end", help="time when the test ended") group.add_argument( - "--id-array", help="Variable id array we wish to get timeseries data for" + "--test-id", help="Id of the test for which we want to check Pass or Fail" ) class pluginResultsDashboard: def __init__(self, args): - print("Hello from pluginResultsDashboard init") self.logger = logging.getLogger("opl.showel.pluginResultsDashboard") + self.args = args - def upload(self, args): - print("Hello from pluginResultsDashboard upload") - if args.status_data is None: + def upload(self): + if self.args.status_data is None: raise Exception( "Status data file is mandatory to work with --results-dashboard-upload" ) - elif args.es_host_url is None: + elif self.args.es_host_url is None: raise Exception( "ES host url is required to work with --results-dashboard-upload" ) - elif args.group_name is None: + elif self.args.group_name is None: raise Exception( "Group Name is mandatory to work with --results-dashboard-upload" ) - elif args.product_name is None: + elif self.args.product_name is None: raise Exception( "Product Name is mandatory to work with --results-dashboard-upload" ) - elif args.test_name is None: + elif self.args.test_name is None: raise Exception( "Test Name is mandatory to work with --results-dashboard-upload" ) else: - jsonFile = open(args.status_data, "r") + jsonFile = open(self.args.status_data, "r") values = json.load(jsonFile) jsonFile.close() date = values["timestamp"] @@ -377,32 +352,32 @@ def upload(self, args): ) headers = {"Content-Type": "application/json"} current_doc_in_es = requests.get( - f"{args.es_host_url}/{args.dashboard_es_index}/_search", + f"{self.args.es_host_url}/{self.args.dashboard_es_index}/_search", headers=headers, data=json_data, ) if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: - print("Uploading to results dashboard") + logging.info("Uploading to results dashboard") upload_data = json.dumps( { "date": date, - "group": args.group_name, + "group": self.args.group_name, "link": link, - "product": args.product_name, - "release": args.release, + "product": self.args.product_name, + "release": self.args.release, "result": result, "result_id": result_id, - "test": args.test_name, - "version": args.version, + "test": self.args.test_name, + "version": self.args.version, } ) requests.post( - f"{args.es_host_url}/{args.dashboard_es_index}/_doc", + f"{self.args.es_host_url}/{self.args.dashboard_es_index}/_doc", headers=headers, data=upload_data, ) else: - print("INFO: Already in Results Dashboard ES, skipping upload") + logging.info("INFO: Already in Results Dashboard ES, skipping upload") @staticmethod def args(parser, group_actions): @@ -476,4 +451,4 @@ def main(): ) plugin_object = PLUGINS[plugin_name] plugin_instance = plugin_object(args) - getattr(plugin_instance, function_name)(args) + getattr(plugin_instance, function_name)() From 29355dcc366e88654bbfcb6243093cad8b6c0a86 Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Mon, 15 Jan 2024 17:59:07 +0530 Subject: [PATCH 16/19] Removed unnecessary module --- core/opl/shovel.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index 8852757..7b6d1fc 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -2,7 +2,6 @@ import logging import requests import json -import subprocess import os import re @@ -17,7 +16,7 @@ def __init__(self, args): def list(self): response = requests.get(f"{self.args.prow_base_url}/{self.args.prow_job_name}") # Extract 19-digit numbers using regular expression - numbers = re.findall(r"\b[0-9]{19}\b", response) + numbers = re.findall(r"\b[0-9]{19}\b", response.json()) # Sort the numbers in natural order and get the last 10 unique numbers sorted_numbers = sorted(set(numbers), key=lambda x: int(x)) From e67aa089a461b2bb3f2bd54b98a848d97bd2ba82 Mon Sep 17 00:00:00 2001 From: shubham-html-css-js Date: Mon, 15 Jan 2024 19:36:55 +0530 Subject: [PATCH 17/19] fix pylint errors --- core/opl/shovel.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/core/opl/shovel.py b/core/opl/shovel.py index 7b6d1fc..57a5133 100755 --- a/core/opl/shovel.py +++ b/core/opl/shovel.py @@ -16,7 +16,7 @@ def __init__(self, args): def list(self): response = requests.get(f"{self.args.prow_base_url}/{self.args.prow_job_name}") # Extract 19-digit numbers using regular expression - numbers = re.findall(r"\b[0-9]{19}\b", response.json()) + numbers = re.findall(r"\b[0-9]{19}\b", response.text) # Sort the numbers in natural order and get the last 10 unique numbers sorted_numbers = sorted(set(numbers), key=lambda x: int(x)) @@ -36,7 +36,7 @@ def download(self): logging.info(f"DEBUG: File {to_path} already present, skipping download") @staticmethod - def args(parser, group_actions): + def set_args(parser, group_actions): group_actions.add_argument( "--prow-list", dest="actions", @@ -124,7 +124,7 @@ def upload(self): logging.info("INFO: Already in ES, skipping upload") @staticmethod - def args(parser, group_actions): + def set_args(parser, group_actions): group_actions.add_argument( "--opensearch-upload", dest="actions", @@ -267,7 +267,7 @@ def result(self): status_data.doit_set(self.args.horreum_data_file, {"result": "PASS"}) @staticmethod - def args(parser, group_actions): + def set_args(parser, group_actions): group_actions.add_argument( "--horreum-upload", dest="actions", @@ -379,7 +379,7 @@ def upload(self): logging.info("INFO: Already in Results Dashboard ES, skipping upload") @staticmethod - def args(parser, group_actions): + def set_args(parser, group_actions): group_actions.add_argument( "--resultsdashboard-upload", dest="actions", @@ -440,7 +440,7 @@ def main(): description="Various high level things you can do", ) for name, plugin in PLUGINS.items(): - plugin.args(parser, group_actions) + plugin.set_args(parser, group_actions) with skelet.test_setup(parser) as (args, status_data): logger = logging.getLogger("main") From f8dd214ded175f77195b7b15d1086c68c232ba23 Mon Sep 17 00:00:00 2001 From: spadakan Date: Mon, 15 Jan 2024 17:27:09 -0500 Subject: [PATCH 18/19] fixing lint error --- opl/investigator/config.py | 8 ++++++-- opl/pass_or_fail.py | 9 +++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/opl/investigator/config.py b/opl/investigator/config.py index 7cd5cb2..c3c49ab 100644 --- a/opl/investigator/config.py +++ b/opl/investigator/config.py @@ -77,7 +77,9 @@ def load_config(conf, fp): conf.history_es_query = data["history"]["es_query"] if "es_server_user" in data["history"]: conf.history_es_server_user = data["history"]["es_server_user"] - conf.history_es_server_pass_env_var = data["history"]["es_server_pass_env_var"] + conf.history_es_server_pass_env_var = data["history"][ + "es_server_pass_env_var" + ] if "es_server_verify" in data["history"]: conf.history_es_server_verify = data["history"]["es_server_verify"] else: @@ -97,7 +99,9 @@ def load_config(conf, fp): conf.decisions_es_index = data["decisions"]["es_index"] if "es_server_user" in data["decisions"]: conf.decisions_es_server_user = data["decisions"]["es_server_user"] - conf.decisions_es_server_pass_env_var = data["decisions"]["es_server_pass_env_var"] + conf.decisions_es_server_pass_env_var = data["decisions"][ + "es_server_pass_env_var" + ] if "es_server_verify" in data["decisions"]: conf.decisions_es_server_verify = data["decisions"]["es_server_verify"] else: diff --git a/opl/pass_or_fail.py b/opl/pass_or_fail.py index 7b0c0c6..a52b116 100755 --- a/opl/pass_or_fail.py +++ b/opl/pass_or_fail.py @@ -114,7 +114,10 @@ def main(): if args.history_type == "csv": history = opl.investigator.csv_loader.load(args.history_file, args.sets) elif args.history_type == "elasticsearch": - if hasattr(args, "history_es_server_verify") and not args.history_es_server_verify: + if ( + hasattr(args, "history_es_server_verify") + and not args.history_es_server_verify + ): # SSL verification is disabled by default opl.http.insecure() history = opl.investigator.elasticsearch_loader.load( @@ -123,7 +126,9 @@ def main(): args.history_es_query, args.sets, es_server_user=getattr(args, "history_es_server_user", None), - es_server_pass_env_var=getattr(args, "history_es_server_pass_env_var", None), + es_server_pass_env_var=getattr( + args, "history_es_server_pass_env_var", None + ), ) elif args.history_type == "sd_dir": From 610dfe435e2c454bbe13af83ce7418f7fa657ec1 Mon Sep 17 00:00:00 2001 From: Sarath Padakandla Date: Tue, 16 Jan 2024 06:17:20 -0500 Subject: [PATCH 19/19] Delete core/opl/shovel.py --- core/opl/shovel.py | 453 --------------------------------------------- 1 file changed, 453 deletions(-) delete mode 100755 core/opl/shovel.py diff --git a/core/opl/shovel.py b/core/opl/shovel.py deleted file mode 100755 index 57a5133..0000000 --- a/core/opl/shovel.py +++ /dev/null @@ -1,453 +0,0 @@ -import argparse -import logging -import requests -import json -import os -import re - -from . import skelet, status_data - - -class pluginProw: - def __init__(self, args): - self.logger = logging.getLogger("opl.showel.pluginProw") - self.args = args - - def list(self): - response = requests.get(f"{self.args.prow_base_url}/{self.args.prow_job_name}") - # Extract 19-digit numbers using regular expression - numbers = re.findall(r"\b[0-9]{19}\b", response.text) - - # Sort the numbers in natural order and get the last 10 unique numbers - sorted_numbers = sorted(set(numbers), key=lambda x: int(x)) - last_10_numbers = sorted_numbers[-10:] - return last_10_numbers - - def download(self): - from_url = f"{self.args.prow_base_url}/{self.args.prow_job_name}/{self.args.prow_test_name}/{self.args.prow_artifact_path}" - to_path = f"{self.args.prow_data_file}" - if not os.path.isfile(to_path): - logging.info(f"INFO: Downloading {from_url} ... ", end="") - response = requests.get(f"{from_url}") - with open(to_path, "w") as f: - f.write(response.content) - logging.info("DONE") - else: - logging.info(f"DEBUG: File {to_path} already present, skipping download") - - @staticmethod - def set_args(parser, group_actions): - group_actions.add_argument( - "--prow-list", - dest="actions", - default=[], - action="append_const", - const=("prow", "list"), - help="List runs for specific Prow run", - ) - group_actions.add_argument( - "--prow-download", - dest="actions", - default=[], - action="append_const", - const=("prow", "download"), - help="Download file from Prow run artifacts", - ) - - group = parser.add_argument_group( - title="prow", - description="Options needed to work with Prow", - ) - group.add_argument( - "--prow-base-url", - default="https://gcsweb-ci.apps.ci.l2s4.p1.openshiftapps.com/gcs/origin-ci-test/logs/", - help="Base URL", - ) - group.add_argument( - "--prow-job-name", - default="periodic-ci-redhat-appstudio-e2e-tests-load-test-ci-daily-10u-10t", - help="Job name as available in ci-operator/jobs/...", - ) - group.add_argument( - "--prow-test-name", - default="load-test-ci-daily-10u-10t", - help="Test name as configured in ci-operator/config/...", - ) - group.add_argument( - "--prow-artifact-path", - default="redhat-appstudio-load-test/artifacts/load-tests.json", - help="Path to the artifact", - ) - group.add_argument("--prow-data-file", help="prow data json file") - - -class pluginOpenSearch: - def __init__(self, args): - self.logger = logging.getLogger("opl.showel.pluginOpenSearch") - self.args = args - - def upload(self): - if self.args.data_file is None: - raise Exception("A Data file is needed to work with --opensearch-upload") - elif self.args.matcher_field is None: - raise Exception("Matcher field is needed to work with --opensearch-upload") - elif self.args.matcher_field_value is None: - raise Exception( - "Matcher field value is needed to work with --opensearch-upload" - ) - else: - json_data = json.dumps( - { - "query": { - "match": { - f"{self.args.matcher_field}": self.args.matcher_field_value - } - } - } - ) - headers = {"Content-Type": "application/json"} - jsonFile = open(self.args.data_file, "r") - values = json.load(jsonFile) - current_doc_in_es = requests.get( - f"{self.args.es_host_url}/{self.args.es_index}/_search", - headers=headers, - data=json_data, - ) - if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: - logging.info("Uploading to ES...") - requests.post( - f"{self.args.es_host_url}/{self.args.es_index}/_doc", - headers=headers, - data=json.dumps(values), - ) - else: - logging.info("INFO: Already in ES, skipping upload") - - @staticmethod - def set_args(parser, group_actions): - group_actions.add_argument( - "--opensearch-upload", - dest="actions", - default=[], - action="append_const", - const=("opensearch", "upload"), - help="Upload file to OpenSearch if not already there", - ) - - group = parser.add_argument_group( - title="opensearch", - description="Options needed to work with OpenSearch", - ) - group.add_argument( - "--es-index", - default="rhtap-ci-status-data", - help="Elastic search index where the data will be stored", - ) - group.add_argument("--data-file", help="json file to upload to elastic search") - group.add_argument( - "--matcher-field", - help="json field which will be used for checking if data exists in ES or not", - ) - group.add_argument("--matcher-field-value", help="value of the matcher field") - - -class pluginHorreum: - def __init__(self, args): - self.logger = logging.getLogger("opl.showel.pluginHorreum") - self.args = args - - def upload(self): - if self.args.horreum_data_file is None: - raise Exception( - "Horreum data file is required to work with --horreum-upload" - ) - elif self.args.horreum_host is None: - raise Exception("Horreum host is required to work with --horreum-upload") - elif self.args.token is None: - raise Exception( - "Authorisation Token is required to work with --horreum-upload" - ) - elif self.args.job_name is None: - raise Exception("Job name is required to work with --horreum-upload") - elif self.args.test_start is None: - raise Exception("Test start is required to work with --horreum-upload") - elif self.args.test_end is None: - raise Exception("Test end is required to work with --horreum-upload") - else: - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.args.token}", - } - jsonFile = open(self.args.horreum_data_file, "r") - values = json.load(jsonFile) - jsonFile.close() - test_matcher = values[self.args.test_job_matcher] - response = requests.get( - f"{self.args.horreum_host}/api/test/byName/{self.args.test_name_horreum}", - headers=headers, - ) - test_id = json.load(response.text)["id"] - filter_data = {f"{self.args.test_matcher}": f"{test_matcher}"} - response = requests.get( - f"{self.args.horreum_host}/api/dataset/list/{test_id}", - headers=headers, - params={"filter": json.dumps(filter_data)}, - ) - datasets = response.json().get("datasets", []) - if len(datasets) > 0: - raise Exception( - f"Test result {self.args.test_matcher}={test_matcher} found in Horreum {datasets}, skipping upload" - ) - logging.info("INFO: Uploading to Horreum ... ") - params = { - "test": self.args.test_name_horreum, - "start": self.args.test_start, - "stop": self.args.test_end, - "owner": self.args.test_owner, - "access": self.args.test_access, - } - requests.post( - f"{self.args.horreum_host}/api/run/data", - params=params, - headers=headers, - data=json.dumps(values), - ) - - def result(self): - if self.args.id_array is None: - raise Exception( - "Id array json file is needed to work with --horreum-result" - ) - elif self.args.horreum_data_file is None: - raise Exception( - "Horreum data file is required to work with --horreum-result" - ) - elif self.args.test_start is None: - raise Exception("Test start is required to work with --horreum-result") - elif self.args.test_end is None: - raise Exception("Test end is required to work with --horreum-result") - elif self.args.test_id is None: - raise Exception("Test id is required to work with --horreum-result") - else: - values = requests.get( - f"https://{self.args.horreum_host}/api/alerting/variables", - params={"test": self.args.test_id}, - ) - id_array = values.json() - is_fail = 0 - for i in id_array: - id_value = i["id"] - jsonFile = open(self.args.horreum_data_file, "r") - values = json.load(jsonFile) - jsonFile.close() - range_data = { - "range": { - "from": self.args.test_start, - "to": self.args.test_end, - "oneBeforeAndAfter": True, - }, - "annotation": {"query": id_value}, - } - - # Send a POST request to the API and retrieve the result using curl and jq - result = requests.get( - f"https://{self.args.horreum_host}/api/changes/annotations", - headers={"content-type: application/json"}, - data=json.dumps(range_data), - ) - - # Check if the result is not an empty list - if result != "[]": - is_fail = 1 - status_data.doit_set( - self.args.horreum_data_file, {"result": "FAIL"} - ) - break - if is_fail != 1: - status_data.doit_set(self.args.horreum_data_file, {"result": "PASS"}) - - @staticmethod - def set_args(parser, group_actions): - group_actions.add_argument( - "--horreum-upload", - dest="actions", - default=[], - action="append_const", - const=("horreum", "upload"), - help="Upload file to Horreum if not already there", - ) - group_actions.add_argument( - "--horreum-result", - dest="actions", - default=[], - action="append_const", - const=("horreum", "result"), - help="Get Horreum no-/change signal for a given time range", - ) - - group = parser.add_argument_group( - title="horreum", - description="Options needed to work with Horreum", - ) - group.add_argument("--horreum-data-file", help="Data file to upload to Horreum") - group.add_argument("--horreum-host", help="Horreum host url") - group.add_argument("--token", help="Authorisation token") - group.add_argument("--job-name", help="Job name") - group.add_argument( - "--test-name-horreum", default="load-tests-result", help="Test Name" - ) - group.add_argument("--test-job-matcher", default="jobName") - group.add_argument("--test-owner", default="rhtap-perf-test-team") - group.add_argument("--test-access", default="PUBLIC") - group.add_argument("--test-start", help="time when the test started") - group.add_argument("--test-end", help="time when the test ended") - group.add_argument( - "--test-id", help="Id of the test for which we want to check Pass or Fail" - ) - - -class pluginResultsDashboard: - def __init__(self, args): - self.logger = logging.getLogger("opl.showel.pluginResultsDashboard") - self.args = args - - def upload(self): - if self.args.status_data is None: - raise Exception( - "Status data file is mandatory to work with --results-dashboard-upload" - ) - elif self.args.es_host_url is None: - raise Exception( - "ES host url is required to work with --results-dashboard-upload" - ) - elif self.args.group_name is None: - raise Exception( - "Group Name is mandatory to work with --results-dashboard-upload" - ) - elif self.args.product_name is None: - raise Exception( - "Product Name is mandatory to work with --results-dashboard-upload" - ) - elif self.args.test_name is None: - raise Exception( - "Test Name is mandatory to work with --results-dashboard-upload" - ) - else: - jsonFile = open(self.args.status_data, "r") - values = json.load(jsonFile) - jsonFile.close() - date = values["timestamp"] - link = values["jobLink"] - result = values["result"] - result_id = values["metadata"]["env"]["BUILD_ID"] - json_data = json.dumps( - { - "query": { - "bool": { - "filter": [{"term": {"result_id.keyword": f"{result_id}"}}] - } - } - } - ) - headers = {"Content-Type": "application/json"} - current_doc_in_es = requests.get( - f"{self.args.es_host_url}/{self.args.dashboard_es_index}/_search", - headers=headers, - data=json_data, - ) - if json.loads(current_doc_in_es.text)["hits"]["total"]["value"] == 0: - logging.info("Uploading to results dashboard") - upload_data = json.dumps( - { - "date": date, - "group": self.args.group_name, - "link": link, - "product": self.args.product_name, - "release": self.args.release, - "result": result, - "result_id": result_id, - "test": self.args.test_name, - "version": self.args.version, - } - ) - requests.post( - f"{self.args.es_host_url}/{self.args.dashboard_es_index}/_doc", - headers=headers, - data=upload_data, - ) - else: - logging.info("INFO: Already in Results Dashboard ES, skipping upload") - - @staticmethod - def set_args(parser, group_actions): - group_actions.add_argument( - "--resultsdashboard-upload", - dest="actions", - default=[], - action="append_const", - const=("resultsdashboard", "upload"), - help="Upload file to Results Dashboard if not already there", - ) - group = parser.add_argument_group( - title="resultsdashboard", - description="Options needed to work with Results Dashboard", - ) - group.add_argument( - "--es-host-url", - help="Elastic search host url", - ) - group.add_argument( - "--dashboard-es-index", - default="results-dashboard-data", - help="Elastic search index where the result is stored", - ) - group.add_argument( - "--status-data", - help="File where we maintain metadata, results, parameters and measurements for this test run", - ) - group.add_argument( - "--group-name", help="Name of the group where the product belongs" - ) - group.add_argument("--product-name", help="Name of the Product") - group.add_argument( - "--release", - default="latest", - help="Type of release of Product for e.g latest,nightly,weekly", - ) - group.add_argument("--test-name", help="Name of the CPT test") - group.add_argument( - "--version", - default="1", - help="Version of the product on which the test ran", - ) - - -PLUGINS = { - "prow": pluginProw, - "opensearch": pluginOpenSearch, - "horreum": pluginHorreum, - "resultsdashboard": pluginResultsDashboard, -} - - -def main(): - parser = argparse.ArgumentParser( - description="Shovel data from A to B", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - group_actions = parser.add_argument_group( - title="actions", - description="Various high level things you can do", - ) - for name, plugin in PLUGINS.items(): - plugin.set_args(parser, group_actions) - - with skelet.test_setup(parser) as (args, status_data): - logger = logging.getLogger("main") - for plugin_name, function_name in args.actions: - logger.info( - f"Instantiating plugin {plugin_name} for function {function_name}" - ) - plugin_object = PLUGINS[plugin_name] - plugin_instance = plugin_object(args) - getattr(plugin_instance, function_name)()