Skip to content

Commit ef6444d

Browse files
committed
test(cache): add cache miss test cases for safety checks
1 parent 7d6a012 commit ef6444d

File tree

2 files changed

+85
-0
lines changed

2 files changed

+85
-0
lines changed

tests/test_content_safety_cache.py

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -279,3 +279,46 @@ async def test_content_safety_check_output_cache_hit(
279279

280280
llm_call_info = llm_call_info_var.get()
281281
assert llm_call_info.from_cache is True
282+
283+
284+
@pytest.mark.asyncio
285+
async def test_content_safety_check_output_cache_miss(
286+
fake_llm_with_stats, mock_task_manager
287+
):
288+
cache = LFUCache(maxsize=10)
289+
290+
cache_entry = {
291+
"result": {"allowed": True, "policy_violations": []},
292+
"llm_stats": {
293+
"total_tokens": 50,
294+
"prompt_tokens": 40,
295+
"completion_tokens": 10,
296+
},
297+
"llm_metadata": None,
298+
}
299+
cache_key = create_normalized_cache_key("different prompt")
300+
cache.put(cache_key, cache_entry)
301+
302+
mock_task_manager.render_task_prompt.return_value = "new output prompt"
303+
mock_task_manager.parse_task_output.return_value = [True, "policy2"]
304+
305+
llm_stats = LLMStats()
306+
llm_stats_var.set(llm_stats)
307+
308+
llm_call_info = LLMCallInfo(task="content_safety_check_output $model=test_model")
309+
llm_call_info_var.set(llm_call_info)
310+
311+
result = await content_safety_check_output(
312+
llms=fake_llm_with_stats,
313+
llm_task_manager=mock_task_manager,
314+
model_name="test_model",
315+
context={"user_message": "new user input", "bot_message": "new bot response"},
316+
model_caches={"test_model": cache},
317+
)
318+
319+
assert result["allowed"] is True
320+
assert result["policy_violations"] == ["policy2"]
321+
assert cache.size() == 2
322+
323+
llm_call_info = llm_call_info_var.get()
324+
assert llm_call_info.from_cache is False

tests/test_topic_safety_cache.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -108,6 +108,48 @@ async def test_topic_safety_cache_hit(fake_llm_topic, mock_task_manager):
108108
assert llm_call_info.from_cache is True
109109

110110

111+
@pytest.mark.asyncio
112+
async def test_topic_safety_cache_miss(fake_llm_topic, mock_task_manager):
113+
cache = LFUCache(maxsize=10)
114+
115+
different_messages = [
116+
{"type": "system", "content": "Different prompt"},
117+
{"type": "user", "content": "Different question"},
118+
]
119+
cache_entry = {
120+
"result": {"on_topic": True},
121+
"llm_stats": {
122+
"total_tokens": 30,
123+
"prompt_tokens": 25,
124+
"completion_tokens": 5,
125+
},
126+
"llm_metadata": None,
127+
}
128+
cache_key = create_normalized_cache_key(different_messages)
129+
cache.put(cache_key, cache_entry)
130+
131+
llm_stats = LLMStats()
132+
llm_stats_var.set(llm_stats)
133+
134+
llm_call_info = LLMCallInfo(task="topic_safety_check_input $model=test_model")
135+
llm_call_info_var.set(llm_call_info)
136+
137+
result = await topic_safety_check_input(
138+
llms=fake_llm_topic,
139+
llm_task_manager=mock_task_manager,
140+
model_name="test_model",
141+
context={"user_message": "What is machine learning?"},
142+
events=[],
143+
model_caches={"test_model": cache},
144+
)
145+
146+
assert result["on_topic"] is True
147+
assert cache.size() == 2
148+
149+
llm_call_info = llm_call_info_var.get()
150+
assert llm_call_info.from_cache is False
151+
152+
111153
@pytest.mark.asyncio
112154
async def test_topic_safety_without_cache(fake_llm_topic, mock_task_manager):
113155
result = await topic_safety_check_input(

0 commit comments

Comments
 (0)