1
+ import pytest
2
+ from unittest .mock import patch , MagicMock
3
+ from datetime import datetime , timedelta
4
+ from django .utils import timezone
5
+ from django .test import TestCase
6
+ import pytz
7
+
8
+ from .models import LLMService , LLMUsage
9
+
10
+
11
+ @pytest .mark .django_db
12
+ class TestLLMService :
13
+ """Test cases for LLMService model and its methods."""
14
+
15
+ def setup_method (self ):
16
+ """Set up test data for each test method."""
17
+ # Create test LLM services with different priorities
18
+ self .gemini_service = LLMService .objects .create (
19
+ provider = 'gemini' ,
20
+ priority = 1 ,
21
+ is_active = True
22
+ )
23
+ self .openai_service = LLMService .objects .create (
24
+ provider = 'openai' ,
25
+ priority = 2 ,
26
+ is_active = True
27
+ )
28
+ self .claude_service = LLMService .objects .create (
29
+ provider = 'claude' ,
30
+ priority = 3 ,
31
+ is_active = True
32
+ )
33
+
34
+ def test_string_representation (self ):
35
+ """Test __str__ method of LLMService."""
36
+ assert str (self .gemini_service ) == "Gemini (Priority: 1)"
37
+ assert str (self .openai_service ) == "OpenAI (Priority: 2)"
38
+
39
+ def test_get_llm_provider_model_no_active_services (self ):
40
+ """Test get_llm_provider_model when no services are active."""
41
+ LLMService .objects .update (is_active = False )
42
+
43
+ provider , model = LLMService .get_llm_provider_model ()
44
+
45
+ assert provider is None
46
+ assert model is None
47
+
48
+ @patch .object (LLMService , '_get_available_models_for_provider' )
49
+ def test_get_llm_provider_model_priority_order (self , mock_get_available ):
50
+ """Test that get_llm_provider_model respects priority order."""
51
+ # Mock gemini (priority 1) to have no available models
52
+ # Mock openai (priority 2) to have available models
53
+ def side_effect (provider , config ):
54
+ if provider == 'gemini' :
55
+ return [] # No available models
56
+ elif provider == 'openai' :
57
+ return ['gpt-4.1-2025-04-14' ]
58
+ elif provider == 'claude' :
59
+ return ['claude-sonnet-4-0' ]
60
+ return []
61
+
62
+ mock_get_available .side_effect = side_effect
63
+
64
+ provider , model = LLMService .get_llm_provider_model ()
65
+
66
+ assert provider == 'openai'
67
+ assert model == 'gpt-4.1-2025-04-14'
68
+
69
+ @patch .object (LLMService , '_get_available_models_for_provider' )
70
+ def test_get_llm_provider_model_fallback_to_claude (self , mock_get_available ):
71
+ """Test fallback to Claude when other providers are unavailable."""
72
+ def side_effect (provider , config ):
73
+ if provider in ['gemini' , 'openai' ]:
74
+ return [] # No available models
75
+ elif provider == 'claude' :
76
+ return ['claude-sonnet-4-0' ]
77
+ return []
78
+
79
+ mock_get_available .side_effect = side_effect
80
+
81
+ provider , model = LLMService .get_llm_provider_model ()
82
+
83
+ assert provider == 'claude'
84
+ assert model == 'claude-sonnet-4-0'
85
+
86
+ def test_get_available_models_for_provider_gemini_no_usage (self ):
87
+ """Test _get_available_models_for_provider for Gemini with no usage."""
88
+ model_configs = {
89
+ 'google-gla:gemini-2.5-pro-preview-06-05' : {
90
+ 'daily_requests' : 25 ,
91
+ 'daily_tokens' : 1000000 ,
92
+ 'provider' : 'gemini'
93
+ },
94
+ 'google-gla:gemini-2.5-flash-preview-05-20' : {
95
+ 'daily_requests' : 500 ,
96
+ 'provider' : 'gemini'
97
+ }
98
+ }
99
+
100
+ available_models = LLMService ._get_available_models_for_provider ('gemini' , model_configs )
101
+
102
+ expected_models = ['gemini-2.5-pro-preview-06-05' , 'gemini-2.5-flash-preview-05-20' ]
103
+ assert set (available_models ) == set (expected_models )
104
+
105
+ def test_get_available_models_for_provider_gemini_with_usage (self ):
106
+ """Test _get_available_models_for_provider for Gemini with existing usage."""
107
+ # Create usage that exceeds daily request limit for pro model
108
+ pacific_tz = pytz .timezone ('US/Pacific' )
109
+ now_pacific = timezone .now ().astimezone (pacific_tz )
110
+ start_of_day_pacific = now_pacific .replace (hour = 0 , minute = 0 , second = 0 , microsecond = 0 )
111
+ start_of_day_utc = start_of_day_pacific .astimezone (pytz .UTC )
112
+
113
+ # Create 25 usage records (at the limit)
114
+ for i in range (25 ):
115
+ LLMUsage .objects .create (
116
+ model_name = 'google-gla:gemini-2.5-pro-preview-06-05' ,
117
+ input_tokens = 1000 ,
118
+ output_tokens = 500 ,
119
+ total_tokens = 1500 ,
120
+ date = start_of_day_utc + timedelta (minutes = i )
121
+ )
122
+
123
+ model_configs = {
124
+ 'google-gla:gemini-2.5-pro-preview-06-05' : {
125
+ 'daily_requests' : 25 ,
126
+ 'daily_tokens' : 1000000 ,
127
+ 'provider' : 'gemini'
128
+ },
129
+ 'google-gla:gemini-2.5-flash-preview-05-20' : {
130
+ 'daily_requests' : 500 ,
131
+ 'provider' : 'gemini'
132
+ }
133
+ }
134
+
135
+ available_models = LLMService ._get_available_models_for_provider ('gemini' , model_configs )
136
+
137
+ # Pro model should be excluded due to request limit, flash should be available
138
+ assert 'gemini-2.5-pro-preview-06-05' not in available_models
139
+ assert 'gemini-2.5-flash-preview-05-20' in available_models
140
+
141
+ def test_get_available_models_for_provider_openai_no_usage (self ):
142
+ """Test _get_available_models_for_provider for OpenAI with no usage."""
143
+ model_configs = {
144
+ 'openai:gpt-4.1-2025-04-14' : {
145
+ 'daily_tokens' : 250000 ,
146
+ 'provider' : 'openai' ,
147
+ 'combined_with' : ['openai:gpt-4.5-preview-2025-02-27' ]
148
+ },
149
+ 'openai:gpt-4.5-preview-2025-02-27' : {
150
+ 'daily_tokens' : 250000 ,
151
+ 'provider' : 'openai' ,
152
+ 'combined_with' : ['openai:gpt-4.1-2025-04-14' ]
153
+ },
154
+ 'openai:gpt-4.1-mini-2025-04-14' : {
155
+ 'daily_tokens' : 2500000 ,
156
+ 'provider' : 'openai'
157
+ }
158
+ }
159
+
160
+ available_models = LLMService ._get_available_models_for_provider ('openai' , model_configs )
161
+
162
+ expected_models = ['gpt-4.1-2025-04-14' , 'gpt-4.5-preview-2025-02-27' , 'gpt-4.1-mini-2025-04-14' ]
163
+ assert set (available_models ) == set (expected_models )
164
+
165
+ def test_get_available_models_for_provider_openai_combined_quota_exceeded (self ):
166
+ """Test OpenAI combined quota handling when limit is exceeded."""
167
+ # Create usage that exceeds combined quota (90% of 250000)
168
+ today_start = timezone .now ().replace (hour = 0 , minute = 0 , second = 0 , microsecond = 0 )
169
+
170
+ # Add usage for both combined models totaling 230000 tokens (over 90% limit)
171
+ LLMUsage .objects .create (
172
+ model_name = 'openai:gpt-4.1-2025-04-14' ,
173
+ input_tokens = 100000 ,
174
+ output_tokens = 50000 ,
175
+ total_tokens = 150000 ,
176
+ date = today_start + timedelta (hours = 1 )
177
+ )
178
+ LLMUsage .objects .create (
179
+ model_name = 'openai:gpt-4.5-preview-2025-02-27' ,
180
+ input_tokens = 60000 ,
181
+ output_tokens = 20000 ,
182
+ total_tokens = 80000 ,
183
+ date = today_start + timedelta (hours = 2 )
184
+ )
185
+
186
+ model_configs = {
187
+ 'openai:gpt-4.1-2025-04-14' : {
188
+ 'daily_tokens' : 250000 ,
189
+ 'provider' : 'openai' ,
190
+ 'combined_with' : ['openai:gpt-4.5-preview-2025-02-27' ]
191
+ },
192
+ 'openai:gpt-4.5-preview-2025-02-27' : {
193
+ 'daily_tokens' : 250000 ,
194
+ 'provider' : 'openai' ,
195
+ 'combined_with' : ['openai:gpt-4.1-2025-04-14' ]
196
+ },
197
+ 'openai:gpt-4.1-mini-2025-04-14' : {
198
+ 'daily_tokens' : 2500000 ,
199
+ 'provider' : 'openai'
200
+ }
201
+ }
202
+
203
+ available_models = LLMService ._get_available_models_for_provider ('openai' , model_configs )
204
+
205
+ # Combined quota models should be excluded, mini should be available
206
+ assert 'gpt-4.1-2025-04-14' not in available_models
207
+ assert 'gpt-4.5-preview-2025-02-27' not in available_models
208
+ assert 'gpt-4.1-mini-2025-04-14' in available_models
209
+
210
+ def test_get_available_models_for_provider_claude_always_available (self ):
211
+ """Test that Claude is always available as fallback."""
212
+ model_configs = {} # Empty config
213
+
214
+ available_models = LLMService ._get_available_models_for_provider ('claude' , model_configs )
215
+
216
+ assert available_models == ['claude-sonnet-4-0' ]
217
+
218
+ def test_get_available_models_for_provider_unknown_provider (self ):
219
+ """Test behavior with unknown provider."""
220
+ model_configs = {}
221
+
222
+ available_models = LLMService ._get_available_models_for_provider ('unknown' , model_configs )
223
+
224
+ assert available_models == []
225
+
226
+ def test_timezone_handling_gemini_pacific (self ):
227
+ """Test that Gemini usage correctly handles Pacific timezone."""
228
+ # Create usage at different Pacific times to test timezone conversion
229
+ pacific_tz = pytz .timezone ('US/Pacific' )
230
+ utc_tz = pytz .UTC
231
+
232
+ # Create usage at 11 PM Pacific (should count as same day)
233
+ pacific_time = pacific_tz .localize (datetime (2024 , 1 , 15 , 23 , 0 , 0 ))
234
+ utc_time = pacific_time .astimezone (utc_tz )
235
+
236
+ LLMUsage .objects .create (
237
+ model_name = 'google-gla:gemini-2.5-pro-preview-06-05' ,
238
+ input_tokens = 1000 ,
239
+ output_tokens = 500 ,
240
+ total_tokens = 1500 ,
241
+ date = utc_time
242
+ )
243
+
244
+ # Mock the current time to be next day at 1 AM Pacific
245
+ next_day_pacific = pacific_tz .localize (datetime (2024 , 1 , 16 , 1 , 0 , 0 ))
246
+
247
+ with patch ('django.utils.timezone.now' ) as mock_now :
248
+ mock_now .return_value = next_day_pacific .astimezone (utc_tz )
249
+
250
+ model_configs = {
251
+ 'google-gla:gemini-2.5-pro-preview-06-05' : {
252
+ 'daily_requests' : 25 ,
253
+ 'provider' : 'gemini'
254
+ }
255
+ }
256
+
257
+ available_models = LLMService ._get_available_models_for_provider ('gemini' , model_configs )
258
+
259
+ # Should be available since it's a new Pacific day
260
+ assert 'gemini-2.5-pro-preview-06-05' in available_models
261
+
262
+ def test_meta_options (self ):
263
+ """Test model meta options."""
264
+ meta = LLMService ._meta
265
+ assert meta .verbose_name == "LLM Service"
266
+ assert meta .verbose_name_plural == "LLM Services"
267
+ assert meta .ordering == ['priority' ]
0 commit comments