@@ -153,25 +153,36 @@ def compute_generation_log(processing_log: List[dict]) -> GenerationLog:
153153 action_params = event_data ["action_params" ],
154154 started_at = event ["timestamp" ],
155155 )
156- activated_rail .executed_actions .append (executed_action )
156+ if activated_rail is not None :
157+ activated_rail .executed_actions .append (executed_action )
157158
158159 elif event_type == "InternalSystemActionFinished" :
159160 action_name = event_data ["action_name" ]
160161 if action_name in ignored_actions :
161162 continue
162163
163- executed_action .finished_at = event ["timestamp" ]
164- executed_action .duration = (
165- executed_action .finished_at - executed_action .started_at
166- )
167- executed_action .return_value = event_data ["return_value" ]
164+ if executed_action is not None :
165+ executed_action .finished_at = event ["timestamp" ]
166+ if (
167+ executed_action .finished_at is not None
168+ and executed_action .started_at is not None
169+ ):
170+ executed_action .duration = (
171+ executed_action .finished_at - executed_action .started_at
172+ )
173+ executed_action .return_value = event_data ["return_value" ]
168174 executed_action = None
169175
170176 elif event_type in ["InputRailFinished" , "OutputRailFinished" ]:
171- activated_rail .finished_at = event ["timestamp" ]
172- activated_rail .duration = (
173- activated_rail .finished_at - activated_rail .started_at
174- )
177+ if activated_rail is not None :
178+ activated_rail .finished_at = event ["timestamp" ]
179+ if (
180+ activated_rail .finished_at is not None
181+ and activated_rail .started_at is not None
182+ ):
183+ activated_rail .duration = (
184+ activated_rail .finished_at - activated_rail .started_at
185+ )
175186 activated_rail = None
176187
177188 elif event_type == "InputRailsFinished" :
@@ -181,14 +192,21 @@ def compute_generation_log(processing_log: List[dict]) -> GenerationLog:
181192 output_rails_finished_at = event ["timestamp" ]
182193
183194 elif event ["type" ] == "llm_call_info" :
184- executed_action .llm_calls .append (event ["data" ])
195+ if executed_action is not None :
196+ executed_action .llm_calls .append (event ["data" ])
185197
186198 # If at the end of the processing we still have an active rail, it is because
187199 # we have hit a stop. In this case, we take the last timestamp as the timestamp for
188200 # finishing the rail.
189201 if activated_rail is not None :
190202 activated_rail .finished_at = last_timestamp
191- activated_rail .duration = activated_rail .finished_at - activated_rail .started_at
203+ if (
204+ activated_rail .finished_at is not None
205+ and activated_rail .started_at is not None
206+ ):
207+ activated_rail .duration = (
208+ activated_rail .finished_at - activated_rail .started_at
209+ )
192210
193211 if activated_rail .type in ["input" , "output" ]:
194212 activated_rail .stop = True
@@ -213,9 +231,13 @@ def compute_generation_log(processing_log: List[dict]) -> GenerationLog:
213231 if activated_rail .type in ["dialog" , "generation" ]:
214232 next_rail = generation_log .activated_rails [i + 1 ]
215233 activated_rail .finished_at = next_rail .started_at
216- activated_rail .duration = (
217- activated_rail .finished_at - activated_rail .started_at
218- )
234+ if (
235+ activated_rail .finished_at is not None
236+ and activated_rail .started_at is not None
237+ ):
238+ activated_rail .duration = (
239+ activated_rail .finished_at - activated_rail .started_at
240+ )
219241
220242 # If we have output rails, we also record the general stats
221243 if output_rails_started_at :
@@ -257,17 +279,21 @@ def compute_generation_log(processing_log: List[dict]) -> GenerationLog:
257279
258280 for executed_action in activated_rail .executed_actions :
259281 for llm_call in executed_action .llm_calls :
260- generation_log .stats .llm_calls_count += 1
261- generation_log .stats .llm_calls_duration += llm_call .duration
262- generation_log .stats .llm_calls_total_prompt_tokens += (
263- llm_call .prompt_tokens or 0
264- )
265- generation_log .stats .llm_calls_total_completion_tokens += (
266- llm_call .completion_tokens or 0
267- )
268- generation_log .stats .llm_calls_total_tokens += (
269- llm_call .total_tokens or 0
270- )
282+ generation_log .stats .llm_calls_count = (
283+ generation_log .stats .llm_calls_count or 0
284+ ) + 1
285+ generation_log .stats .llm_calls_duration = (
286+ generation_log .stats .llm_calls_duration or 0
287+ ) + (llm_call .duration or 0 )
288+ generation_log .stats .llm_calls_total_prompt_tokens = (
289+ generation_log .stats .llm_calls_total_prompt_tokens or 0
290+ ) + (llm_call .prompt_tokens or 0 )
291+ generation_log .stats .llm_calls_total_completion_tokens = (
292+ generation_log .stats .llm_calls_total_completion_tokens or 0
293+ ) + (llm_call .completion_tokens or 0 )
294+ generation_log .stats .llm_calls_total_tokens = (
295+ generation_log .stats .llm_calls_total_tokens or 0
296+ ) + (llm_call .total_tokens or 0 )
271297
272298 generation_log .stats .total_duration = (
273299 processing_log [- 1 ]["timestamp" ] - processing_log [0 ]["timestamp" ]
0 commit comments