Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Applying Filters to the Telco Table #157

Open
wants to merge 13 commits into
base: revamp
Choose a base branch
from
Open
8 changes: 8 additions & 0 deletions backend/app/api/v1/commons/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,3 +96,11 @@
"startDate": "Start Date",
"endDate": "End Date",
}

FIELDS_FILTER_DICT = {
"nodeName": "node_name",
"cpu": "cpu",
"benchmark": "test_type",
"ocpVersion": "ocp_version",
"releaseStream": "ocp_build",
}
3 changes: 2 additions & 1 deletion backend/app/api/v1/commons/ocp.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pandas as pd
import app.api.v1.commons.utils as utils
from app.services.search import ElasticService
from app.api.v1.commons.constants import OCP_FIELD_CONSTANT_DICT


async def getData(
Expand Down Expand Up @@ -79,7 +80,7 @@ def fillEncryptionType(row):
async def getFilterData(start_datetime: date, end_datetime: date, configpath: str):
es = ElasticService(configpath=configpath)

aggregate = utils.buildAggregateQuery("OCP_FIELD_CONSTANT_DICT")
aggregate = utils.buildAggregateQuery(OCP_FIELD_CONSTANT_DICT)

response = await es.filterPost(start_datetime, end_datetime, aggregate)
await es.close()
Expand Down
3 changes: 2 additions & 1 deletion backend/app/api/v1/commons/quay.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import pandas as pd
import app.api.v1.commons.utils as utils
from app.services.search import ElasticService
from app.api.v1.commons.constants import QUAY_FIELD_CONSTANT_DICT


async def getData(
Expand Down Expand Up @@ -54,7 +55,7 @@ async def getFilterData(start_datetime: date, end_datetime: date, configpath: st

es = ElasticService(configpath=configpath)

aggregate = utils.buildAggregateQuery("QUAY_FIELD_CONSTANT_DICT")
aggregate = utils.buildAggregateQuery(QUAY_FIELD_CONSTANT_DICT)

response = await es.filterPost(start_datetime, end_datetime, aggregate)
await es.close()
Expand Down
146 changes: 81 additions & 65 deletions backend/app/api/v1/commons/telco.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,13 @@


async def getData(
start_datetime: date, end_datetime: date, size: int, offset: int, configpath: str
start_datetime: date,
end_datetime: date,
size: int,
offset: int,
filter: str,
configpath: str,
):
test_types = [
"oslat",
"cyclictest",
"cpu_util",
"deployment",
"ptp",
"reboot",
"rfc-2544",
]
cfg = config.get_config()
try:
jenkins_url = cfg.get("telco.config.job_url")
Expand All @@ -40,67 +36,61 @@ async def getData(
"latest_time": "{}T23:59:59".format(end_datetime.strftime("%Y-%m-%d")),
"output_mode": "json",
}
searchList = " OR ".join(
['test_type="{}"'.format(test_type) for test_type in test_types]
)
searchList = constructFilterQuery(filter)

splunk = SplunkService(configpath=configpath)
response = await splunk.query(
query=query, size=size, offset=offset, searchList=searchList
)
mapped_list = []
for each_response in response["data"]:
end_timestamp = int(each_response["timestamp"])
test_data = each_response["data"]
threshold = await telcoGraphs.process_json(test_data, True)
hash_digest, encrypted_data = hasher.hash_encrypt_json(each_response)
execution_time_seconds = test_type_execution_times.get(
test_data["test_type"], 0
)
start_timestamp = end_timestamp - execution_time_seconds
start_time_utc = datetime.fromtimestamp(start_timestamp, tz=timezone.utc)
end_time_utc = datetime.fromtimestamp(end_timestamp, tz=timezone.utc)
kernel = test_data["kernel"] if "kernel" in test_data else "Undefined"

mapped_list.append(
{
"uuid": hash_digest,
"encryptedData": encrypted_data.decode("utf-8"),
"ciSystem": "Jenkins",
"benchmark": test_data["test_type"],
"kernel": kernel,
"shortVersion": test_data["ocp_version"],
"ocpVersion": test_data["ocp_build"],
"releaseStream": utils.getReleaseStream(
{"releaseStream": test_data["ocp_build"]}
),
"nodeName": test_data["node_name"],
"cpu": test_data["cpu"],
"formal": test_data["formal"],
"startDate": str(start_time_utc),
"endDate": str(end_time_utc),
"buildUrl": jenkins_url
+ "/"
+ str(test_data["cluster_artifacts"]["ref"]["jenkins_build"]),
"jobStatus": "failure" if (threshold != 0) else "success",
"jobDuration": execution_time_seconds,
}
)
if response:
for each_response in response["data"]:
end_timestamp = int(each_response["timestamp"])
test_data = each_response["data"]
threshold = await telcoGraphs.process_json(test_data, True)
hash_digest, encrypted_data = hasher.hash_encrypt_json(each_response)
execution_time_seconds = test_type_execution_times.get(
test_data["test_type"], 0
)
start_timestamp = end_timestamp - execution_time_seconds
start_time_utc = datetime.fromtimestamp(start_timestamp, tz=timezone.utc)
end_time_utc = datetime.fromtimestamp(end_timestamp, tz=timezone.utc)
kernel = test_data["kernel"] if "kernel" in test_data else "Undefined"

mapped_list.append(
{
"uuid": hash_digest,
"encryptedData": encrypted_data.decode("utf-8"),
"ciSystem": "Jenkins",
"benchmark": test_data["test_type"],
"kernel": kernel,
"shortVersion": test_data["ocp_version"],
"ocpVersion": test_data["ocp_build"],
"releaseStream": utils.getReleaseStream(
{"releaseStream": test_data["ocp_build"]}
),
"nodeName": test_data["node_name"],
"cpu": test_data["cpu"],
"formal": test_data["formal"],
"startDate": str(start_time_utc),
"endDate": str(end_time_utc),
"buildUrl": jenkins_url
+ "/"
+ str(test_data["cluster_artifacts"]["ref"]["jenkins_build"]),
"jobStatus": "failure" if (threshold != 0) else "success",
"jobDuration": execution_time_seconds,
}
)

jobs = pd.json_normalize(mapped_list)

return {"data": jobs, "total": response["total"]}
return {"data": jobs, "total": response["total"] if response else 0}


async def getFilterData(start_datetime: date, end_datetime: date, configpath: str):
test_types = [
"oslat",
"cyclictest",
"cpu_util",
"deployment",
"ptp",
"reboot",
"rfc-2544",
]
async def getFilterData(
start_datetime: date, end_datetime: date, filter: str, configpath: str
):

cfg = config.get_config()
try:
jenkins_url = cfg.get("telco.config.job_url")
Expand All @@ -112,13 +102,11 @@ async def getFilterData(start_datetime: date, end_datetime: date, configpath: st
"latest_time": "{}T23:59:59".format(end_datetime.strftime("%Y-%m-%d")),
"output_mode": "json",
}
searchList = " OR ".join(
['test_type="{}"'.format(test_type) for test_type in test_types]
)
searchList = constructFilterQuery(filter)

splunk = SplunkService(configpath=configpath)
response = await splunk.filterPost(query=query, searchList=searchList)
filterData = []
print(response["data"])
if len(response["data"]) > 0:
for item in response["data"]:
for field, value in item.items():
Expand Down Expand Up @@ -155,3 +143,31 @@ async def getFilterData(start_datetime: date, end_datetime: date, configpath: st
{"key": "jobStatus", "value": ["success", "failure"], "name": "Status"}
)
return {"data": filterData, "total": response["total"]}


def constructFilterQuery(filter):
test_types = [
"oslat",
"cyclictest",
"cpu_util",
"deployment",
"ptp",
"reboot",
"rfc-2544",
]
test_type_filter = " OR ".join(
f'test_type="{test_type}"' for test_type in test_types
)
search_list = test_type_filter

if filter:
filter_dict = utils.get_dict_from_qs(filter)
search_query = utils.construct_query(filter_dict)

# Update `search_list` based on the presence of "benchmark" in `filter_dict`
search_list = (
search_query
if "benchmark" in filter_dict
else f"{search_query} {test_type_filter}"
)
return search_list
23 changes: 23 additions & 0 deletions backend/app/api/v1/commons/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import re
import app.api.v1.commons.constants as constants
from typing import Optional
from urllib.parse import parse_qs


async def getMetadata(uuid: str, configpath: str):
Expand Down Expand Up @@ -133,3 +134,25 @@ def buildReleaseStreamFilter(input_array):
)
mapped_array.append(match)
return list(set(mapped_array))


def get_dict_from_qs(query_string):
query_dict = parse_qs(query_string)
cleaned_dict = {
key: [v.strip("'") for v in values] for key, values in query_dict.items()
}

return cleaned_dict


def construct_query(filter_dict):
query_parts = []
if isinstance(filter_dict, dict):
for key, values in filter_dict.items():
k = constants.FIELDS_FILTER_DICT[key]
if len(values) > 1:
or_clause = " OR ".join([f'{k}="{value}"' for value in values])
query_parts.append(f"{or_clause}")
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is really just query_parts.append(or_clause) 😆

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You didn't change this? On the one hand, it won't break anything, but it's an awkward usage that'll unnecessarily draw the attention of a reader away from the code flow. 😆

else:
query_parts.append(f'{k}="{values[0]}"')
return " ".join(query_parts)
2 changes: 1 addition & 1 deletion backend/app/api/v1/endpoints/cpt/maps/hce.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .app.api.v1.commons.hce import getData
from app.api.v1.commons.hce import getData
from datetime import date
import pandas as pd

Expand Down
2 changes: 1 addition & 1 deletion backend/app/api/v1/endpoints/cpt/maps/ocm.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .app.api.v1.commons.ocm import getData
from app.api.v1.commons.ocm import getData
from datetime import date
import pandas as pd

Expand Down
4 changes: 2 additions & 2 deletions backend/app/api/v1/endpoints/cpt/maps/ocp.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .app.api.v1.commons.ocp import getData
from .app.api.v1.commons.utils import getReleaseStream
from app.api.v1.commons.ocp import getData
from app.api.v1.commons.utils import getReleaseStream
from datetime import date
import pandas as pd

Expand Down
2 changes: 1 addition & 1 deletion backend/app/api/v1/endpoints/cpt/maps/quay.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from .app.api.v1.commons.quay import getData
from app.api.v1.commons.quay import getData
from datetime import date
import pandas as pd

Expand Down
4 changes: 2 additions & 2 deletions backend/app/api/v1/endpoints/cpt/maps/telco.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from .app.api.v1.commons.telco import getData
from .app.api.v1.commons.utils import getReleaseStream
from app.api.v1.commons.telco import getData
from app.api.v1.commons.utils import getReleaseStream
from datetime import date
import pandas as pd

Expand Down
22 changes: 11 additions & 11 deletions backend/app/api/v1/endpoints/telco/telcoGraphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ async def process_json(json_data: dict, is_row: bool):

def process_ptp(json_data: str, is_row: bool):
nic = json_data["nic"]
ptp4l_max_offset = json_data.get("ptp4l_max_offset", 0)
ptp4l_max_offset = json_data.get("ptp4l_max_offset") or 0
if "mellanox" in nic.lower():
defined_offset_threshold = 200
else:
Expand Down Expand Up @@ -87,9 +87,9 @@ def process_reboot(json_data: str, is_row: bool):
defined_threshold = 20
reboot_type = json_data["reboot_type"]
for each_iteration in json_data["Iterations"]:
max_minutes = max(max_minutes, each_iteration.get("total_minutes", 0))
avg_minutes += each_iteration.get("total_minutes", 0)
avg_minutes /= len(json_data["Iterations"])
max_minutes = max(max_minutes, each_iteration.get("total_minutes") or 0)
avg_minutes += each_iteration.get("total_minutes") or 0
avg_minutes /= max(len(json_data["Iterations"]), 1)
if max_minutes > defined_threshold:
minus_max_minutes = max_minutes - defined_threshold
if avg_minutes > defined_threshold:
Expand Down Expand Up @@ -150,9 +150,9 @@ def process_cpu_util(json_data: str, is_row: bool):
if each_scenario["scenario_name"] == "steadyworkload":
for each_type in each_scenario["types"]:
if each_type["type_name"] == "total":
total_max_cpu = each_type.get("max_cpu", 0)
total_max_cpu = each_type.get("max_cpu") or 0
break
total_avg_cpu = each_scenario.get("avg_cpu_total", 0)
total_avg_cpu = each_scenario.get("avg_cpu_total") or 0
break
if total_max_cpu > defined_threshold:
minus_max_cpu = total_max_cpu - defined_threshold
Expand Down Expand Up @@ -199,7 +199,7 @@ def process_cpu_util(json_data: str, is_row: bool):


def process_rfc_2544(json_data: str, is_row: bool):
max_delay = json_data.get("max_delay", 0)
max_delay = json_data.get("max_delay") or 0
defined_delay_threshold = 30.0
minus_max_delay = 0.0
if max_delay > defined_delay_threshold:
Expand Down Expand Up @@ -259,8 +259,8 @@ def process_cyclictest(json_data: str, is_row: bool):


def process_deployment(json_data: str, is_row: bool):
total_minutes = json_data.get("total_minutes", 0)
reboot_count = json_data.get("reboot_count", 0)
total_minutes = json_data.get("total_minutes") or 0
reboot_count = json_data.get("reboot_count") or 0
defined_total_minutes_threshold = 180
defined_total_reboot_count = 3
minus_total_minutes = 0.0
Expand Down Expand Up @@ -358,9 +358,9 @@ def get_oslat_or_cyclictest(json_data: str, is_row: bool):
defined_latency_threshold = 20
defined_number_of_nines_threshold = 100
for each_test_unit in json_data["test_units"]:
max_latency = max(max_latency, each_test_unit.get("max_latency", 0))
max_latency = max(max_latency, each_test_unit.get("max_latency") or 0)
min_number_of_nines = min(
min_number_of_nines, each_test_unit.get("number_of_nines", 0)
min_number_of_nines, each_test_unit.get("number_of_nines") or 0
)
if max_latency > defined_latency_threshold:
minus_max_latency = max_latency - defined_latency_threshold
Expand Down
6 changes: 4 additions & 2 deletions backend/app/api/v1/endpoints/telco/telcoJobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ async def jobs(
pretty: bool = Query(False, description="Output content in pretty format."),
size: int = Query(None, description="Number of jobs to fetch"),
offset: int = Query(None, description="Offset Number to fetch jobs from"),
filter: str = Query(None, description="Query to filter the jobs"),
):
if start_date is None:
start_date = datetime.utcnow().date()
Expand All @@ -53,7 +54,7 @@ async def jobs(
)
offset, size = normalize_pagination(offset, size)

results = await getData(start_date, end_date, size, offset, "telco.splunk")
results = await getData(start_date, end_date, size, offset, filter, "telco.splunk")

jobs = []
if "data" in results and len(results["data"]) >= 1:
Expand Down Expand Up @@ -100,6 +101,7 @@ async def filters(
pretty: bool = Query(False, description="Output content in pretty format."),
size: int = Query(None, description="Number of jobs to fetch"),
offset: int = Query(None, description="Offset Number to fetch jobs from"),
filter: str = Query(None, description="Query to filter the jobs"),
):
if start_date is None:
start_date = datetime.utcnow().date()
Expand All @@ -116,7 +118,7 @@ async def filters(
status_code=422,
)

results = await getFilterData(start_date, end_date, "telco.splunk")
results = await getFilterData(start_date, end_date, filter, "telco.splunk")

response = {"filterData": results["data"], "summary": {"total": results["total"]}}

Expand Down
Loading