@@ -34,8 +34,8 @@ def test_litellm_openai_basic(
3434 time .sleep (SLEEP_TO_FLUSH_SECONDS )
3535
3636 spans = span_exporter .get_finished_spans ()
37- assert len (spans ) == 2
38- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
37+ assert len (spans ) == 1
38+ span = spans [0 ]
3939 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
4040 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
4141 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -79,8 +79,8 @@ def test_litellm_openai_text_block(
7979 time .sleep (SLEEP_TO_FLUSH_SECONDS )
8080
8181 spans = span_exporter .get_finished_spans ()
82- assert len (spans ) == 2
83- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
82+ assert len (spans ) == 1
83+ span = spans [0 ]
8484 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
8585 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
8686 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -124,8 +124,8 @@ def test_litellm_openai_with_streaming(
124124 time .sleep (SLEEP_TO_FLUSH_SECONDS )
125125
126126 spans = span_exporter .get_finished_spans ()
127- assert len (spans ) == 2
128- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
127+ assert len (spans ) == 1
128+ span = spans [0 ]
129129 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
130130 assert span .attributes ["gen_ai.usage.input_tokens" ] == 14
131131 assert span .attributes ["gen_ai.usage.output_tokens" ] == 7
@@ -178,12 +178,9 @@ def test_litellm_openai_with_chat_history(
178178 time .sleep (SLEEP_TO_FLUSH_SECONDS )
179179
180180 spans = span_exporter .get_finished_spans ()
181- assert len (spans ) == 4
182- inner_spans = [s for s in spans if s .name == "litellm.completion" ]
183- assert len (inner_spans ) == 2
184- inner_spans = sorted (inner_spans , key = lambda s : s .start_time )
185- first_span = sorted (inner_spans , key = lambda s : s .start_time )[0 ]
186- second_span = sorted (inner_spans , key = lambda s : s .start_time )[1 ]
181+ assert len (spans ) == 2
182+ first_span = sorted (spans , key = lambda s : s .start_time )[0 ]
183+ second_span = sorted (spans , key = lambda s : s .start_time )[1 ]
187184 assert first_span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
188185 assert second_span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
189186
@@ -258,8 +255,8 @@ def test_litellm_openai_with_image_base64(
258255 time .sleep (SLEEP_TO_FLUSH_SECONDS )
259256
260257 spans = span_exporter .get_finished_spans ()
261- assert len (spans ) == 2
262- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
258+ assert len (spans ) == 1
259+ span = spans [0 ]
263260 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
264261 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
265262 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -313,8 +310,8 @@ def test_litellm_openai_with_image_url(
313310 time .sleep (SLEEP_TO_FLUSH_SECONDS )
314311
315312 spans = span_exporter .get_finished_spans ()
316- assert len (spans ) == 2
317- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
313+ assert len (spans ) == 1
314+ span = spans [0 ]
318315 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
319316 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
320317 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -374,8 +371,8 @@ async def test_async_litellm_openai_with_image_base64(
374371 await asyncio .sleep (SLEEP_TO_FLUSH_SECONDS )
375372
376373 spans = span_exporter .get_finished_spans ()
377- assert len (spans ) == 2
378- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
374+ assert len (spans ) == 1
375+ span = spans [0 ]
379376 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
380377 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
381378 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -430,8 +427,8 @@ async def test_async_litellm_openai_with_image_url(
430427 await asyncio .sleep (SLEEP_TO_FLUSH_SECONDS )
431428
432429 spans = span_exporter .get_finished_spans ()
433- assert len (spans ) == 2
434- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
430+ assert len (spans ) == 1
431+ span = spans [0 ]
435432 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
436433 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
437434 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -472,8 +469,8 @@ async def test_async_litellm_openai_basic(
472469 await asyncio .sleep (SLEEP_TO_FLUSH_SECONDS )
473470
474471 spans = span_exporter .get_finished_spans ()
475- assert len (spans ) == 2
476- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
472+ assert len (spans ) == 1
473+ span = spans [0 ]
477474 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
478475 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
479476 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -518,8 +515,8 @@ async def test_async_litellm_openai_text_block(
518515 await asyncio .sleep (SLEEP_TO_FLUSH_SECONDS )
519516
520517 spans = span_exporter .get_finished_spans ()
521- assert len (spans ) == 2
522- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
518+ assert len (spans ) == 1
519+ span = spans [0 ]
523520 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
524521 assert span .attributes ["gen_ai.response.model" ] == "gpt-4.1-nano-2025-04-14"
525522 assert span .attributes ["gen_ai.response.id" ] == response .id
@@ -564,8 +561,8 @@ async def test_async_litellm_openai_with_streaming(
564561 await asyncio .sleep (SLEEP_TO_FLUSH_SECONDS )
565562
566563 spans = span_exporter .get_finished_spans ()
567- assert len (spans ) == 2
568- span = [ s for s in spans if s . name == "litellm.completion" ] [0 ]
564+ assert len (spans ) == 1
565+ span = spans [0 ]
569566 assert span .attributes ["gen_ai.request.model" ] == "gpt-4.1-nano"
570567 assert span .attributes ["gen_ai.usage.input_tokens" ] == 14
571568 assert span .attributes ["gen_ai.usage.output_tokens" ] == 7
0 commit comments