|
| 1 | +import ast |
| 2 | +import glob |
| 3 | +import json |
| 4 | +import math |
| 5 | +import os |
| 6 | +import statistics |
| 7 | + |
| 8 | +from event.json import json_loads |
| 9 | + |
| 10 | + |
| 11 | +def preprocess_json_file(file_path): |
| 12 | + with open(file_path, "r") as f: |
| 13 | + content = f.read().strip() |
| 14 | + # Add brackets around the content to form a valid JSON array |
| 15 | + content = f"[{content}]" |
| 16 | + |
| 17 | + # Replace ',{' with '},{' to ensure proper JSON array formatting |
| 18 | + content = content.replace("},{", "},{") |
| 19 | + |
| 20 | + # Remove any trailing commas (`,]` is not valid JSON) |
| 21 | + content = content.replace(",]", "]") |
| 22 | + |
| 23 | + try: |
| 24 | + data = json_loads(content) # noqa: T201 |
| 25 | + except json.JSONDecodeError as e: |
| 26 | + print(f"Error parsing JSON in file {file_path}: {e}") # noqa: T201 |
| 27 | + raise |
| 28 | + return data |
| 29 | + |
| 30 | + |
| 31 | +def transform_params(params_str): |
| 32 | + # Convert the string representation of the dictionary to an actual dictionary |
| 33 | + try: |
| 34 | + params_dict = ast.literal_eval(params_str) |
| 35 | + except (ValueError, SyntaxError) as e: |
| 36 | + print(f"Error parsing string to dictionary: {e}") # noqa: T201 |
| 37 | + return {} |
| 38 | + |
| 39 | + # Transform the dictionary, excluding "use_cpm" |
| 40 | + transformed_dict = { |
| 41 | + f"request.params.{key}": value |
| 42 | + for key, value in params_dict.items() |
| 43 | + if key != "use_cpm" # Ignore the "use_cpm" key |
| 44 | + } |
| 45 | + |
| 46 | + return transformed_dict |
| 47 | + |
| 48 | + |
| 49 | +def extract_response_times(json_files): |
| 50 | + ldap_times = [] |
| 51 | + cpm_times = [] |
| 52 | + params = [] |
| 53 | + |
| 54 | + for file in json_files: |
| 55 | + data = preprocess_json_file(file) |
| 56 | + for entry in data: |
| 57 | + ldap_times.append(entry["ldap_response_time"]) |
| 58 | + cpm_times.append(entry["cpm_response_time"]) |
| 59 | + params_dict = transform_params(entry["params"]) |
| 60 | + params_dict["path"] = entry["path"] |
| 61 | + params.append(params_dict) |
| 62 | + |
| 63 | + return ldap_times, cpm_times, params |
| 64 | + |
| 65 | + |
| 66 | +def format_value(value): |
| 67 | + return f"{value:.2f} ms" |
| 68 | + |
| 69 | + |
| 70 | +def calculate_statistics(times_list): |
| 71 | + if not times_list: |
| 72 | + return { |
| 73 | + "mean": "0.00 ms", |
| 74 | + "mean_under_1s": "0.00 ms", |
| 75 | + "mode": "N/A", |
| 76 | + "lowest": "0.00 ms", |
| 77 | + "highest": "0.00 ms", |
| 78 | + "median": "0.00 ms", |
| 79 | + } |
| 80 | + |
| 81 | + try: |
| 82 | + mode_value = statistics.mode(times_list) |
| 83 | + except statistics.StatisticsError: |
| 84 | + mode_value = "N/A" # No unique mode found |
| 85 | + |
| 86 | + geometric_mean = math.exp(sum(math.log(x) for x in times_list) / len(times_list)) |
| 87 | + |
| 88 | + # Filter times under 1000ms |
| 89 | + times_under_1s = [time for time in times_list if time < 1000] |
| 90 | + |
| 91 | + mean_under_1s = sum(times_under_1s) / len(times_under_1s) if times_under_1s else 0 |
| 92 | + |
| 93 | + return { |
| 94 | + "mean": format_value(sum(times_list) / len(times_list)), |
| 95 | + "mean_under_1s": format_value(mean_under_1s), |
| 96 | + "mode": format_value(mode_value) if mode_value != "N/A" else mode_value, |
| 97 | + "lowest": format_value(min(times_list)), |
| 98 | + "highest": format_value(max(times_list)), |
| 99 | + "median": format_value(statistics.median(times_list)), |
| 100 | + } |
| 101 | + |
| 102 | + |
| 103 | +def write_to_json_file(output_file_path, data_list): |
| 104 | + # Check if the file already exists |
| 105 | + if os.path.exists(output_file_path): |
| 106 | + print( # noqa: T201 |
| 107 | + f"The file '{output_file_path}' already exists. No action will be taken." # noqa: T201 |
| 108 | + ) # noqa: T201 |
| 109 | + return |
| 110 | + |
| 111 | + # Write the list to the JSON file if it does not exist |
| 112 | + with open(output_file_path, "w") as file: |
| 113 | + json.dump(data_list, file, indent=4) |
| 114 | + |
| 115 | + |
| 116 | +# Get all JSON files in the directory |
| 117 | +# json_files = ["test_success_0.json", "test_success_1.json", "test_success_2.json"] |
| 118 | +json_files = glob.glob("src/api/tests/sds_data_tests/test_success_*.json") |
| 119 | +# json_files = glob.glob("*.json") |
| 120 | + |
| 121 | +# Extract response times |
| 122 | +ldap_times, cpm_times, params = extract_response_times(json_files) |
| 123 | +output_file_path = ( |
| 124 | + "src/api/tests/sds_data_tests/data/sds_fhir_api.speed_test_queries.device.json" |
| 125 | +) |
| 126 | + |
| 127 | +# Calculate statistics |
| 128 | +ldap_stats = calculate_statistics(ldap_times) |
| 129 | +cpm_stats = calculate_statistics(cpm_times) |
| 130 | + |
| 131 | +print(f"LDAP Response Time Stats: {ldap_stats}") # noqa: T201 |
| 132 | +print(f"CPM Response Time Stats: {cpm_stats}") # noqa: T201 |
| 133 | +write_to_json_file(output_file_path, params) |
0 commit comments