Skip to content

Commit 4d85185

Browse files
committed
Merge branch 'feature/PI-506-upgrade_lambdas' into release/2024-09-13
2 parents 10d9fed + b887849 commit 4d85185

File tree

13 files changed

+5281
-58
lines changed

13 files changed

+5281
-58
lines changed
Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1-
account_name = "prod"
2-
environment = "prod"
3-
domain = "api.cpm.national.nhs.uk"
1+
account_name = "prod"
2+
environment = "prod"
3+
domain = "api.cpm.national.nhs.uk"
4+
lambda_memory_size = 1536
Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1-
account_name = "qa"
2-
environment = "qa"
3-
domain = "api.cpm.qa.national.nhs.uk"
1+
account_name = "qa"
2+
environment = "qa"
3+
domain = "api.cpm.qa.national.nhs.uk"
4+
lambda_memory_size = 1536

infrastructure/terraform/per_workspace/main.tf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -112,6 +112,7 @@ module "lambdas" {
112112
resources = local.permission_resource_map[replace(file, ".json", "")]
113113
}
114114
}
115+
memory_size = var.lambda_memory_size
115116
}
116117

117118
module "authoriser" {

infrastructure/terraform/per_workspace/modules/api_worker/api_lambda/lambda.tf

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ module "lambda_function" {
77
handler = "api.${var.name}.index.handler"
88
runtime = var.python_version
99
timeout = 10
10+
memory_size = var.memory_size
1011

1112
timeouts = {
1213
create = "5m"

infrastructure/terraform/per_workspace/modules/api_worker/api_lambda/vars.tf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,3 +40,7 @@ variable "attach_policy_statements" {
4040
variable "policy_statements" {
4141
default = {}
4242
}
43+
44+
variable "memory_size" {
45+
default = 128
46+
}

infrastructure/terraform/per_workspace/vars.tf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,3 +65,7 @@ variable "python_version" {
6565
variable "domain" {
6666
type = string
6767
}
68+
69+
variable "lambda_memory_size" {
70+
default = 128
71+
}

scripts/test/test.mk

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ SDS_DEV_APIKEY =
66
USE_CPM_PROD ?= FALSE
77
TEST_COUNT =
88
COMPARISON_ENV ?= local
9+
RUN_SPEEDTEST = ?= FALSE
910

1011
_pytest:
1112
AWS_DEFAULT_REGION=$(AWS_DEFAULT_REGION) AWS_ACCESS_KEY_ID=$(AWS_ACCESS_KEY_ID) AWS_SECRET_ACCESS_KEY=$(AWS_SECRET_ACCESS_KEY) AWS_SESSION_TOKEN=$(AWS_SESSION_TOKEN) poetry run python -m pytest $(PYTEST_FLAGS) $(_INTERNAL_FLAGS) $(_CACHE_CLEAR)
@@ -40,4 +41,4 @@ test--feature--%--auto-retry: ## Autoretry of failed feature (gherkin) tests
4041
$(MAKE) test--feature--$* _INTERNAL_FLAGS="--define='auto_retry=true'"
4142

4243
test--sds--matrix: ## Run end-to-end smoke tests that check data matches betweeen cpm and ldap
43-
SDS_PROD_APIKEY=$(SDS_PROD_APIKEY) SDS_DEV_APIKEY=$(SDS_DEV_APIKEY) USE_CPM_PROD=$(USE_CPM_PROD) TEST_COUNT=$(TEST_COUNT) COMPARISON_ENV=$(COMPARISON_ENV) poetry run python -m pytest $(PYTEST_FLAGS) -m 'matrix' --ignore=src/layers --ignore=src/etl $(_CACHE_CLEAR)
44+
SDS_PROD_APIKEY=$(SDS_PROD_APIKEY) SDS_DEV_APIKEY=$(SDS_DEV_APIKEY) USE_CPM_PROD=$(USE_CPM_PROD) TEST_COUNT=$(TEST_COUNT) COMPARISON_ENV=$(COMPARISON_ENV) RUN_SPEEDTEST=$(RUN_SPEEDTEST) poetry run python -m pytest $(PYTEST_FLAGS) -m 'matrix' --ignore=src/layers --ignore=src/etl $(_CACHE_CLEAR)
Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
import ast
2+
import glob
3+
import json
4+
import math
5+
import os
6+
import statistics
7+
8+
from event.json import json_loads
9+
10+
11+
def preprocess_json_file(file_path):
12+
with open(file_path, "r") as f:
13+
content = f.read().strip()
14+
# Add brackets around the content to form a valid JSON array
15+
content = f"[{content}]"
16+
17+
# Replace ',{' with '},{' to ensure proper JSON array formatting
18+
content = content.replace("},{", "},{")
19+
20+
# Remove any trailing commas (`,]` is not valid JSON)
21+
content = content.replace(",]", "]")
22+
23+
try:
24+
data = json_loads(content) # noqa: T201
25+
except json.JSONDecodeError as e:
26+
print(f"Error parsing JSON in file {file_path}: {e}") # noqa: T201
27+
raise
28+
return data
29+
30+
31+
def transform_params(params_str):
32+
# Convert the string representation of the dictionary to an actual dictionary
33+
try:
34+
params_dict = ast.literal_eval(params_str)
35+
except (ValueError, SyntaxError) as e:
36+
print(f"Error parsing string to dictionary: {e}") # noqa: T201
37+
return {}
38+
39+
# Transform the dictionary, excluding "use_cpm"
40+
transformed_dict = {
41+
f"request.params.{key}": value
42+
for key, value in params_dict.items()
43+
if key != "use_cpm" # Ignore the "use_cpm" key
44+
}
45+
46+
return transformed_dict
47+
48+
49+
def extract_response_times(json_files):
50+
ldap_times = []
51+
cpm_times = []
52+
params = []
53+
54+
for file in json_files:
55+
data = preprocess_json_file(file)
56+
for entry in data:
57+
ldap_times.append(entry["ldap_response_time"])
58+
cpm_times.append(entry["cpm_response_time"])
59+
params_dict = transform_params(entry["params"])
60+
params_dict["path"] = entry["path"]
61+
params.append(params_dict)
62+
63+
return ldap_times, cpm_times, params
64+
65+
66+
def format_value(value):
67+
return f"{value:.2f} ms"
68+
69+
70+
def calculate_statistics(times_list):
71+
if not times_list:
72+
return {
73+
"mean": "0.00 ms",
74+
"mean_under_1s": "0.00 ms",
75+
"mode": "N/A",
76+
"lowest": "0.00 ms",
77+
"highest": "0.00 ms",
78+
"median": "0.00 ms",
79+
}
80+
81+
try:
82+
mode_value = statistics.mode(times_list)
83+
except statistics.StatisticsError:
84+
mode_value = "N/A" # No unique mode found
85+
86+
geometric_mean = math.exp(sum(math.log(x) for x in times_list) / len(times_list))
87+
88+
# Filter times under 1000ms
89+
times_under_1s = [time for time in times_list if time < 1000]
90+
91+
mean_under_1s = sum(times_under_1s) / len(times_under_1s) if times_under_1s else 0
92+
93+
return {
94+
"mean": format_value(sum(times_list) / len(times_list)),
95+
"mean_under_1s": format_value(mean_under_1s),
96+
"mode": format_value(mode_value) if mode_value != "N/A" else mode_value,
97+
"lowest": format_value(min(times_list)),
98+
"highest": format_value(max(times_list)),
99+
"median": format_value(statistics.median(times_list)),
100+
}
101+
102+
103+
def write_to_json_file(output_file_path, data_list):
104+
# Check if the file already exists
105+
if os.path.exists(output_file_path):
106+
print( # noqa: T201
107+
f"The file '{output_file_path}' already exists. No action will be taken." # noqa: T201
108+
) # noqa: T201
109+
return
110+
111+
# Write the list to the JSON file if it does not exist
112+
with open(output_file_path, "w") as file:
113+
json.dump(data_list, file, indent=4)
114+
115+
116+
# Get all JSON files in the directory
117+
# json_files = ["test_success_0.json", "test_success_1.json", "test_success_2.json"]
118+
json_files = glob.glob("src/api/tests/sds_data_tests/test_success_*.json")
119+
# json_files = glob.glob("*.json")
120+
121+
# Extract response times
122+
ldap_times, cpm_times, params = extract_response_times(json_files)
123+
output_file_path = (
124+
"src/api/tests/sds_data_tests/data/sds_fhir_api.speed_test_queries.device.json"
125+
)
126+
127+
# Calculate statistics
128+
ldap_stats = calculate_statistics(ldap_times)
129+
cpm_stats = calculate_statistics(cpm_times)
130+
131+
print(f"LDAP Response Time Stats: {ldap_stats}") # noqa: T201
132+
print(f"CPM Response Time Stats: {cpm_stats}") # noqa: T201
133+
write_to_json_file(output_file_path, params)

src/api/tests/sds_data_tests/conftest.py

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,22 @@
11
import json
2+
import subprocess
23

34
import pytest
45

56
current_file_index = 0
67
entry_count = 0
78
max_entries_per_file = 2000
8-
file_name_template = "test_success_{}.json"
9+
file_name_template = "src/api/tests/sds_data_tests/test_success_{}.json"
10+
11+
12+
@pytest.fixture(scope="session", autouse=True)
13+
def run_after_tests():
14+
# This code will run after all tests have finished
15+
yield
16+
# Code to run after tests
17+
subprocess.run(
18+
["python", "src/api/tests/sds_data_tests/calculation.py"], check=True
19+
)
920

1021

1122
def get_current_file():
@@ -41,7 +52,7 @@ def pytest_runtest_logreport(report):
4152
str(report.longreprtext)
4253
)
4354
output = {"failed_request": failed_request, "error": assertion_error}
44-
with open("test_failure.json", "a") as f:
55+
with open("src/api/tests/sds_data_tests/test_failure.json", "a") as f:
4556
f.write(json.dumps(output))
4657
f.write(",")
4758
if report.when == "call" and report.passed and hasattr(pytest, "success_message"):

0 commit comments

Comments
 (0)