diff --git a/langfuse/_client/client.py b/langfuse/_client/client.py index 9beed5f67..c317fbf1b 100644 --- a/langfuse/_client/client.py +++ b/langfuse/_client/client.py @@ -1868,10 +1868,7 @@ def _create_trace_tags_via_ingestion( return try: - new_body = TraceBody( - id=trace_id, - tags=tags, - ) + new_body = TraceBody(id=trace_id, tags=tags, user_id=None, session_id=None) event = { "id": self.create_trace_id(), diff --git a/langfuse/api/.fern/metadata.json b/langfuse/api/.fern/metadata.json index 4f1a155c9..780b22608 100644 --- a/langfuse/api/.fern/metadata.json +++ b/langfuse/api/.fern/metadata.json @@ -1,7 +1,7 @@ { - "cliVersion": "3.30.3", + "cliVersion": "3.88.0", "generatorName": "fernapi/fern-python-sdk", - "generatorVersion": "4.46.2", + "generatorVersion": "4.64.1", "generatorConfig": { "pydantic_config": { "enum_type": "python_enums", diff --git a/langfuse/api/annotation_queues/raw_client.py b/langfuse/api/annotation_queues/raw_client.py index 451095061..535b5fb30 100644 --- a/langfuse/api/annotation_queues/raw_client.py +++ b/langfuse/api/annotation_queues/raw_client.py @@ -12,6 +12,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.annotation_queue import AnnotationQueue @@ -29,6 +30,7 @@ ) from .types.paginated_annotation_queue_items import PaginatedAnnotationQueueItems from .types.paginated_annotation_queues import PaginatedAnnotationQueues +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -144,6 +146,13 @@ def list_queues( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -259,6 +268,13 @@ def create_queue( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -360,6 +376,13 @@ def get_queue( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -481,6 +504,13 @@ def list_queue_items( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -589,6 +619,13 @@ def get_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -709,6 +746,13 @@ def create_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -824,6 +868,13 @@ def update_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -932,6 +983,13 @@ def delete_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1043,6 +1101,13 @@ def create_queue_assignment( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1154,6 +1219,13 @@ def delete_queue_assignment( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1271,6 +1343,13 @@ async def list_queues( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1386,6 +1465,13 @@ async def create_queue( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1487,6 +1573,13 @@ async def get_queue( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1608,6 +1701,13 @@ async def list_queue_items( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1716,6 +1816,13 @@ async def get_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1836,6 +1943,13 @@ async def create_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1951,6 +2065,13 @@ async def update_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -2059,6 +2180,13 @@ async def delete_queue_item( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -2170,6 +2298,13 @@ async def create_queue_assignment( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -2281,6 +2416,13 @@ async def delete_queue_assignment( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/annotation_queues/types/annotation_queue.py b/langfuse/api/annotation_queues/types/annotation_queue.py index 89cc5d407..8ac7a3c5f 100644 --- a/langfuse/api/annotation_queues/types/annotation_queue.py +++ b/langfuse/api/annotation_queues/types/annotation_queue.py @@ -14,13 +14,15 @@ class AnnotationQueue(UniversalBaseModel): name: str description: typing.Optional[str] = None score_config_ids: typing_extensions.Annotated[ - typing.List[str], FieldMetadata(alias="scoreConfigIds") + typing.List[str], + FieldMetadata(alias="scoreConfigIds"), + pydantic.Field(alias="scoreConfigIds"), ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/annotation_queues/types/annotation_queue_assignment_request.py b/langfuse/api/annotation_queues/types/annotation_queue_assignment_request.py index e25e4a327..04b3169b2 100644 --- a/langfuse/api/annotation_queues/types/annotation_queue_assignment_request.py +++ b/langfuse/api/annotation_queues/types/annotation_queue_assignment_request.py @@ -9,7 +9,9 @@ class AnnotationQueueAssignmentRequest(UniversalBaseModel): - user_id: typing_extensions.Annotated[str, FieldMetadata(alias="userId")] + user_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="userId"), pydantic.Field(alias="userId") + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/annotation_queues/types/annotation_queue_item.py b/langfuse/api/annotation_queues/types/annotation_queue_item.py index 9c4b622d8..050ab5986 100644 --- a/langfuse/api/annotation_queues/types/annotation_queue_item.py +++ b/langfuse/api/annotation_queues/types/annotation_queue_item.py @@ -13,20 +13,28 @@ class AnnotationQueueItem(UniversalBaseModel): id: str - queue_id: typing_extensions.Annotated[str, FieldMetadata(alias="queueId")] - object_id: typing_extensions.Annotated[str, FieldMetadata(alias="objectId")] + queue_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="queueId"), pydantic.Field(alias="queueId") + ] + object_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="objectId"), pydantic.Field(alias="objectId") + ] object_type: typing_extensions.Annotated[ - AnnotationQueueObjectType, FieldMetadata(alias="objectType") + AnnotationQueueObjectType, + FieldMetadata(alias="objectType"), + pydantic.Field(alias="objectType"), ] status: AnnotationQueueStatus completed_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="completedAt") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="completedAt"), + pydantic.Field(alias="completedAt", default=None), + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/annotation_queues/types/create_annotation_queue_assignment_response.py b/langfuse/api/annotation_queues/types/create_annotation_queue_assignment_response.py index 8f040d3e4..ebab57293 100644 --- a/langfuse/api/annotation_queues/types/create_annotation_queue_assignment_response.py +++ b/langfuse/api/annotation_queues/types/create_annotation_queue_assignment_response.py @@ -9,9 +9,15 @@ class CreateAnnotationQueueAssignmentResponse(UniversalBaseModel): - user_id: typing_extensions.Annotated[str, FieldMetadata(alias="userId")] - queue_id: typing_extensions.Annotated[str, FieldMetadata(alias="queueId")] - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] + user_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="userId"), pydantic.Field(alias="userId") + ] + queue_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="queueId"), pydantic.Field(alias="queueId") + ] + project_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="projectId"), pydantic.Field(alias="projectId") + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/annotation_queues/types/create_annotation_queue_item_request.py b/langfuse/api/annotation_queues/types/create_annotation_queue_item_request.py index b81287ce5..fef42a8d9 100644 --- a/langfuse/api/annotation_queues/types/create_annotation_queue_item_request.py +++ b/langfuse/api/annotation_queues/types/create_annotation_queue_item_request.py @@ -11,9 +11,13 @@ class CreateAnnotationQueueItemRequest(UniversalBaseModel): - object_id: typing_extensions.Annotated[str, FieldMetadata(alias="objectId")] + object_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="objectId"), pydantic.Field(alias="objectId") + ] object_type: typing_extensions.Annotated[ - AnnotationQueueObjectType, FieldMetadata(alias="objectType") + AnnotationQueueObjectType, + FieldMetadata(alias="objectType"), + pydantic.Field(alias="objectType"), ] status: typing.Optional[AnnotationQueueStatus] = pydantic.Field(default=None) """ diff --git a/langfuse/api/annotation_queues/types/create_annotation_queue_request.py b/langfuse/api/annotation_queues/types/create_annotation_queue_request.py index 1415ad1a7..a53a9deb3 100644 --- a/langfuse/api/annotation_queues/types/create_annotation_queue_request.py +++ b/langfuse/api/annotation_queues/types/create_annotation_queue_request.py @@ -12,7 +12,9 @@ class CreateAnnotationQueueRequest(UniversalBaseModel): name: str description: typing.Optional[str] = None score_config_ids: typing_extensions.Annotated[ - typing.List[str], FieldMetadata(alias="scoreConfigIds") + typing.List[str], + FieldMetadata(alias="scoreConfigIds"), + pydantic.Field(alias="scoreConfigIds"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/blob_storage_integrations/raw_client.py b/langfuse/api/blob_storage_integrations/raw_client.py index 30e486209..f40f85b8d 100644 --- a/langfuse/api/blob_storage_integrations/raw_client.py +++ b/langfuse/api/blob_storage_integrations/raw_client.py @@ -13,6 +13,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.blob_storage_export_frequency import BlobStorageExportFrequency @@ -27,6 +28,7 @@ ) from .types.blob_storage_integration_type import BlobStorageIntegrationType from .types.blob_storage_integrations_response import BlobStorageIntegrationsResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -128,6 +130,13 @@ def get_blob_storage_integrations( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -302,6 +311,13 @@ def upsert_blob_storage_integration( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -402,6 +418,13 @@ def get_blob_storage_integration_status( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -502,6 +525,13 @@ def delete_blob_storage_integration( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -605,6 +635,13 @@ async def get_blob_storage_integrations( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -779,6 +816,13 @@ async def upsert_blob_storage_integration( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -879,6 +923,13 @@ async def get_blob_storage_integration_status( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -979,6 +1030,13 @@ async def delete_blob_storage_integration( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/blob_storage_integrations/types/blob_storage_integration_response.py b/langfuse/api/blob_storage_integrations/types/blob_storage_integration_response.py index b3630297b..ffbef10a5 100644 --- a/langfuse/api/blob_storage_integrations/types/blob_storage_integration_response.py +++ b/langfuse/api/blob_storage_integrations/types/blob_storage_integration_response.py @@ -15,49 +15,73 @@ class BlobStorageIntegrationResponse(UniversalBaseModel): id: str - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] + project_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="projectId"), pydantic.Field(alias="projectId") + ] type: BlobStorageIntegrationType - bucket_name: typing_extensions.Annotated[str, FieldMetadata(alias="bucketName")] + bucket_name: typing_extensions.Annotated[ + str, FieldMetadata(alias="bucketName"), pydantic.Field(alias="bucketName") + ] endpoint: typing.Optional[str] = None region: str access_key_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="accessKeyId") - ] = None + typing.Optional[str], + FieldMetadata(alias="accessKeyId"), + pydantic.Field(alias="accessKeyId", default=None), + ] prefix: str export_frequency: typing_extensions.Annotated[ - BlobStorageExportFrequency, FieldMetadata(alias="exportFrequency") + BlobStorageExportFrequency, + FieldMetadata(alias="exportFrequency"), + pydantic.Field(alias="exportFrequency"), ] enabled: bool force_path_style: typing_extensions.Annotated[ - bool, FieldMetadata(alias="forcePathStyle") + bool, + FieldMetadata(alias="forcePathStyle"), + pydantic.Field(alias="forcePathStyle"), ] file_type: typing_extensions.Annotated[ - BlobStorageIntegrationFileType, FieldMetadata(alias="fileType") + BlobStorageIntegrationFileType, + FieldMetadata(alias="fileType"), + pydantic.Field(alias="fileType"), ] export_mode: typing_extensions.Annotated[ - BlobStorageExportMode, FieldMetadata(alias="exportMode") + BlobStorageExportMode, + FieldMetadata(alias="exportMode"), + pydantic.Field(alias="exportMode"), ] export_start_date: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="exportStartDate") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="exportStartDate"), + pydantic.Field(alias="exportStartDate", default=None), + ] compressed: bool next_sync_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="nextSyncAt") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="nextSyncAt"), + pydantic.Field(alias="nextSyncAt", default=None), + ] last_sync_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="lastSyncAt") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="lastSyncAt"), + pydantic.Field(alias="lastSyncAt", default=None), + ] last_error: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="lastError") - ] = None + typing.Optional[str], + FieldMetadata(alias="lastError"), + pydantic.Field(alias="lastError", default=None), + ] last_error_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="lastErrorAt") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="lastErrorAt"), + pydantic.Field(alias="lastErrorAt", default=None), + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/blob_storage_integrations/types/blob_storage_integration_status_response.py b/langfuse/api/blob_storage_integrations/types/blob_storage_integration_status_response.py index 951074990..229b486c1 100644 --- a/langfuse/api/blob_storage_integrations/types/blob_storage_integration_status_response.py +++ b/langfuse/api/blob_storage_integrations/types/blob_storage_integration_status_response.py @@ -12,38 +12,51 @@ class BlobStorageIntegrationStatusResponse(UniversalBaseModel): id: str - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] + project_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="projectId"), pydantic.Field(alias="projectId") + ] sync_status: typing_extensions.Annotated[ - BlobStorageSyncStatus, FieldMetadata(alias="syncStatus") + BlobStorageSyncStatus, + FieldMetadata(alias="syncStatus"), + pydantic.Field(alias="syncStatus"), ] enabled: bool last_sync_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="lastSyncAt") - ] = pydantic.Field(default=None) - """ - End of the last successfully exported time window. Compare against your ETL bookmark to determine if new data is available. Null if the integration has never synced. - """ - + typing.Optional[dt.datetime], + FieldMetadata(alias="lastSyncAt"), + pydantic.Field( + alias="lastSyncAt", + default=None, + description="End of the last successfully exported time window. Compare against your ETL bookmark to determine if new data is available. Null if the integration has never synced.", + ), + ] next_sync_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="nextSyncAt") - ] = pydantic.Field(default=None) - """ - When the next export is scheduled. Null if no sync has occurred yet. - """ - + typing.Optional[dt.datetime], + FieldMetadata(alias="nextSyncAt"), + pydantic.Field( + alias="nextSyncAt", + default=None, + description="When the next export is scheduled. Null if no sync has occurred yet.", + ), + ] last_error: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="lastError") - ] = pydantic.Field(default=None) - """ - Raw error message from the storage provider (S3/Azure/GCS) if the last export failed. Cleared on successful export. - """ - + typing.Optional[str], + FieldMetadata(alias="lastError"), + pydantic.Field( + alias="lastError", + default=None, + description="Raw error message from the storage provider (S3/Azure/GCS) if the last export failed. Cleared on successful export.", + ), + ] last_error_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="lastErrorAt") - ] = pydantic.Field(default=None) - """ - When the last error occurred. Cleared on successful export. - """ + typing.Optional[dt.datetime], + FieldMetadata(alias="lastErrorAt"), + pydantic.Field( + alias="lastErrorAt", + default=None, + description="When the last error occurred. Cleared on successful export.", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/blob_storage_integrations/types/create_blob_storage_integration_request.py b/langfuse/api/blob_storage_integrations/types/create_blob_storage_integration_request.py index ce23cd246..fb1f29d0d 100644 --- a/langfuse/api/blob_storage_integrations/types/create_blob_storage_integration_request.py +++ b/langfuse/api/blob_storage_integrations/types/create_blob_storage_integration_request.py @@ -14,21 +14,20 @@ class CreateBlobStorageIntegrationRequest(UniversalBaseModel): - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] = ( - pydantic.Field() - ) - """ - ID of the project in which to configure the blob storage integration - """ - + project_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="projectId"), + pydantic.Field( + alias="projectId", + description="ID of the project in which to configure the blob storage integration", + ), + ] type: BlobStorageIntegrationType - bucket_name: typing_extensions.Annotated[str, FieldMetadata(alias="bucketName")] = ( - pydantic.Field() - ) - """ - Name of the storage bucket - """ - + bucket_name: typing_extensions.Annotated[ + str, + FieldMetadata(alias="bucketName"), + pydantic.Field(alias="bucketName", description="Name of the storage bucket"), + ] endpoint: typing.Optional[str] = pydantic.Field(default=None) """ Custom endpoint URL (required for S3_COMPATIBLE type) @@ -40,26 +39,32 @@ class CreateBlobStorageIntegrationRequest(UniversalBaseModel): """ access_key_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="accessKeyId") - ] = pydantic.Field(default=None) - """ - Access key ID for authentication - """ - + typing.Optional[str], + FieldMetadata(alias="accessKeyId"), + pydantic.Field( + alias="accessKeyId", + default=None, + description="Access key ID for authentication", + ), + ] secret_access_key: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="secretAccessKey") - ] = pydantic.Field(default=None) - """ - Secret access key for authentication (will be encrypted when stored) - """ - + typing.Optional[str], + FieldMetadata(alias="secretAccessKey"), + pydantic.Field( + alias="secretAccessKey", + default=None, + description="Secret access key for authentication (will be encrypted when stored)", + ), + ] prefix: typing.Optional[str] = pydantic.Field(default=None) """ Path prefix for exported files (must end with forward slash if provided) """ export_frequency: typing_extensions.Annotated[ - BlobStorageExportFrequency, FieldMetadata(alias="exportFrequency") + BlobStorageExportFrequency, + FieldMetadata(alias="exportFrequency"), + pydantic.Field(alias="exportFrequency"), ] enabled: bool = pydantic.Field() """ @@ -67,25 +72,31 @@ class CreateBlobStorageIntegrationRequest(UniversalBaseModel): """ force_path_style: typing_extensions.Annotated[ - bool, FieldMetadata(alias="forcePathStyle") - ] = pydantic.Field() - """ - Use path-style URLs for S3 requests - """ - + bool, + FieldMetadata(alias="forcePathStyle"), + pydantic.Field( + alias="forcePathStyle", description="Use path-style URLs for S3 requests" + ), + ] file_type: typing_extensions.Annotated[ - BlobStorageIntegrationFileType, FieldMetadata(alias="fileType") + BlobStorageIntegrationFileType, + FieldMetadata(alias="fileType"), + pydantic.Field(alias="fileType"), ] export_mode: typing_extensions.Annotated[ - BlobStorageExportMode, FieldMetadata(alias="exportMode") + BlobStorageExportMode, + FieldMetadata(alias="exportMode"), + pydantic.Field(alias="exportMode"), ] export_start_date: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="exportStartDate") - ] = pydantic.Field(default=None) - """ - Custom start date for exports (required when exportMode is FROM_CUSTOM_DATE) - """ - + typing.Optional[dt.datetime], + FieldMetadata(alias="exportStartDate"), + pydantic.Field( + alias="exportStartDate", + default=None, + description="Custom start date for exports (required when exportMode is FROM_CUSTOM_DATE)", + ), + ] compressed: typing.Optional[bool] = pydantic.Field(default=None) """ Enable gzip compression for exported files (.csv.gz, .json.gz, .jsonl.gz). Defaults to true. diff --git a/langfuse/api/client.py b/langfuse/api/client.py index 3f656cdcd..b80f51560 100644 --- a/langfuse/api/client.py +++ b/langfuse/api/client.py @@ -6,6 +6,7 @@ import httpx from .core.client_wrapper import AsyncClientWrapper, SyncClientWrapper +from .core.logging import LogConfig, Logger if typing.TYPE_CHECKING: from .annotation_queues.client import ( @@ -69,6 +70,9 @@ class LangfuseAPI: httpx_client : typing.Optional[httpx.Client] The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + logging : typing.Optional[typing.Union[LogConfig, Logger]] + Configure logging for the SDK. Accepts a LogConfig dict with 'level' (debug/info/warn/error), 'logger' (custom logger implementation), and 'silent' (boolean, defaults to True) fields. You can also pass a pre-configured Logger instance. + Examples -------- from langfuse import LangfuseAPI @@ -96,6 +100,7 @@ def __init__( timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.Client] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): _defaulted_timeout = ( timeout @@ -120,6 +125,7 @@ def __init__( if follow_redirects is not None else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, + logging=logging, ) self._annotation_queues: typing.Optional[AnnotationQueuesClient] = None self._blob_storage_integrations: typing.Optional[ @@ -385,6 +391,9 @@ class AsyncLangfuseAPI: httpx_client : typing.Optional[httpx.AsyncClient] The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + logging : typing.Optional[typing.Union[LogConfig, Logger]] + Configure logging for the SDK. Accepts a LogConfig dict with 'level' (debug/info/warn/error), 'logger' (custom logger implementation), and 'silent' (boolean, defaults to True) fields. You can also pass a pre-configured Logger instance. + Examples -------- from langfuse import AsyncLangfuseAPI @@ -412,6 +421,7 @@ def __init__( timeout: typing.Optional[float] = None, follow_redirects: typing.Optional[bool] = True, httpx_client: typing.Optional[httpx.AsyncClient] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): _defaulted_timeout = ( timeout @@ -436,6 +446,7 @@ def __init__( if follow_redirects is not None else httpx.AsyncClient(timeout=_defaulted_timeout), timeout=_defaulted_timeout, + logging=logging, ) self._annotation_queues: typing.Optional[AsyncAnnotationQueuesClient] = None self._blob_storage_integrations: typing.Optional[ diff --git a/langfuse/api/comments/raw_client.py b/langfuse/api/comments/raw_client.py index 0bb39539a..c79383a9e 100644 --- a/langfuse/api/comments/raw_client.py +++ b/langfuse/api/comments/raw_client.py @@ -13,10 +13,12 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.create_comment_response import CreateCommentResponse from .types.get_comments_response import GetCommentsResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -148,6 +150,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -275,6 +284,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -379,6 +395,13 @@ def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -512,6 +535,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -639,6 +669,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -743,6 +780,13 @@ async def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/comments/types/create_comment_request.py b/langfuse/api/comments/types/create_comment_request.py index 56ef2794d..a11102f04 100644 --- a/langfuse/api/comments/types/create_comment_request.py +++ b/langfuse/api/comments/types/create_comment_request.py @@ -9,38 +9,44 @@ class CreateCommentRequest(UniversalBaseModel): - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] = ( - pydantic.Field() - ) - """ - The id of the project to attach the comment to. - """ - - object_type: typing_extensions.Annotated[str, FieldMetadata(alias="objectType")] = ( - pydantic.Field() - ) - """ - The type of the object to attach the comment to (trace, observation, session, prompt). - """ - - object_id: typing_extensions.Annotated[str, FieldMetadata(alias="objectId")] = ( - pydantic.Field() - ) - """ - The id of the object to attach the comment to. If this does not reference a valid existing object, an error will be thrown. - """ - + project_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="projectId"), + pydantic.Field( + alias="projectId", + description="The id of the project to attach the comment to.", + ), + ] + object_type: typing_extensions.Annotated[ + str, + FieldMetadata(alias="objectType"), + pydantic.Field( + alias="objectType", + description="The type of the object to attach the comment to (trace, observation, session, prompt).", + ), + ] + object_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="objectId"), + pydantic.Field( + alias="objectId", + description="The id of the object to attach the comment to. If this does not reference a valid existing object, an error will be thrown.", + ), + ] content: str = pydantic.Field() """ The content of the comment. May include markdown. Currently limited to 5000 characters. """ author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = pydantic.Field(default=None) - """ - The id of the user who created the comment. - """ + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field( + alias="authorUserId", + default=None, + description="The id of the user who created the comment.", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/base_score.py b/langfuse/api/commons/types/base_score.py index 44e09033c..b017db7e9 100644 --- a/langfuse/api/commons/types/base_score.py +++ b/langfuse/api/commons/types/base_score.py @@ -13,49 +13,57 @@ class BaseScore(UniversalBaseModel): id: str trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = pydantic.Field(default=None) - """ - The trace ID associated with the score - """ - + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field( + alias="traceId", + default=None, + description="The trace ID associated with the score", + ), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = pydantic.Field(default=None) - """ - The session ID associated with the score - """ - + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field( + alias="sessionId", + default=None, + description="The session ID associated with the score", + ), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = pydantic.Field(default=None) - """ - The observation ID associated with the score - """ - + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field( + alias="observationId", + default=None, + description="The observation ID associated with the score", + ), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = pydantic.Field(default=None) - """ - The dataset run ID associated with the score - """ - + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field( + alias="datasetRunId", + default=None, + description="The dataset run ID associated with the score", + ), + ] name: str source: ScoreSource timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = pydantic.Field(default=None) - """ - The user ID of the author - """ - + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field( + alias="authorUserId", default=None, description="The user ID of the author" + ), + ] comment: typing.Optional[str] = pydantic.Field(default=None) """ Comment on the score @@ -67,19 +75,23 @@ class BaseScore(UniversalBaseModel): """ config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = pydantic.Field(default=None) - """ - Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range - """ - + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field( + alias="configId", + default=None, + description="Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range", + ), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = pydantic.Field(default=None) - """ - The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue. - """ - + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field( + alias="queueId", + default=None, + description="The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue.", + ), + ] environment: str = pydantic.Field() """ The environment from which this score originated. Can be any lowercase alphanumeric string with hyphens and underscores that does not start with 'langfuse'. diff --git a/langfuse/api/commons/types/base_score_v1.py b/langfuse/api/commons/types/base_score_v1.py index 881b10a3b..24e80b3e0 100644 --- a/langfuse/api/commons/types/base_score_v1.py +++ b/langfuse/api/commons/types/base_score_v1.py @@ -12,30 +12,34 @@ class BaseScoreV1(UniversalBaseModel): id: str - trace_id: typing_extensions.Annotated[str, FieldMetadata(alias="traceId")] + trace_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="traceId"), pydantic.Field(alias="traceId") + ] name: str source: ScoreSource observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = pydantic.Field(default=None) - """ - The observation ID associated with the score - """ - + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field( + alias="observationId", + default=None, + description="The observation ID associated with the score", + ), + ] timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = pydantic.Field(default=None) - """ - The user ID of the author - """ - + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field( + alias="authorUserId", default=None, description="The user ID of the author" + ), + ] comment: typing.Optional[str] = pydantic.Field(default=None) """ Comment on the score @@ -47,19 +51,23 @@ class BaseScoreV1(UniversalBaseModel): """ config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = pydantic.Field(default=None) - """ - Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range - """ - + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field( + alias="configId", + default=None, + description="Reference a score config on a score. When set, config and score name must be equal and value must comply to optionally defined numerical range", + ), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = pydantic.Field(default=None) - """ - The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue. - """ - + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field( + alias="queueId", + default=None, + description="The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue.", + ), + ] environment: str = pydantic.Field() """ The environment from which this score originated. Can be any lowercase alphanumeric string with hyphens and underscores that does not start with 'langfuse'. diff --git a/langfuse/api/commons/types/boolean_score.py b/langfuse/api/commons/types/boolean_score.py index 2f65cf338..259b57271 100644 --- a/langfuse/api/commons/types/boolean_score.py +++ b/langfuse/api/commons/types/boolean_score.py @@ -15,11 +15,13 @@ class BooleanScore(BaseScore): """ string_value: typing_extensions.Annotated[ - str, FieldMetadata(alias="stringValue") - ] = pydantic.Field() - """ - The string representation of the score value. Is inferred from the numeric value and equals "True" or "False" - """ + str, + FieldMetadata(alias="stringValue"), + pydantic.Field( + alias="stringValue", + description='The string representation of the score value. Is inferred from the numeric value and equals "True" or "False"', + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/boolean_score_v1.py b/langfuse/api/commons/types/boolean_score_v1.py index cf5425255..63af787a8 100644 --- a/langfuse/api/commons/types/boolean_score_v1.py +++ b/langfuse/api/commons/types/boolean_score_v1.py @@ -15,11 +15,13 @@ class BooleanScoreV1(BaseScoreV1): """ string_value: typing_extensions.Annotated[ - str, FieldMetadata(alias="stringValue") - ] = pydantic.Field() - """ - The string representation of the score value. Is inferred from the numeric value and equals "True" or "False" - """ + str, + FieldMetadata(alias="stringValue"), + pydantic.Field( + alias="stringValue", + description='The string representation of the score value. Is inferred from the numeric value and equals "True" or "False"', + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/categorical_score.py b/langfuse/api/commons/types/categorical_score.py index a12ac58c3..70b5178a5 100644 --- a/langfuse/api/commons/types/categorical_score.py +++ b/langfuse/api/commons/types/categorical_score.py @@ -15,11 +15,13 @@ class CategoricalScore(BaseScore): """ string_value: typing_extensions.Annotated[ - str, FieldMetadata(alias="stringValue") - ] = pydantic.Field() - """ - The string representation of the score value. If no config is linked, can be any string. Otherwise, must map to a config category - """ + str, + FieldMetadata(alias="stringValue"), + pydantic.Field( + alias="stringValue", + description="The string representation of the score value. If no config is linked, can be any string. Otherwise, must map to a config category", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/categorical_score_v1.py b/langfuse/api/commons/types/categorical_score_v1.py index 8f98af1a8..116fc29eb 100644 --- a/langfuse/api/commons/types/categorical_score_v1.py +++ b/langfuse/api/commons/types/categorical_score_v1.py @@ -15,11 +15,13 @@ class CategoricalScoreV1(BaseScoreV1): """ string_value: typing_extensions.Annotated[ - str, FieldMetadata(alias="stringValue") - ] = pydantic.Field() - """ - The string representation of the score value. If no config is linked, can be any string. Otherwise, must map to a config category - """ + str, + FieldMetadata(alias="stringValue"), + pydantic.Field( + alias="stringValue", + description="The string representation of the score value. If no config is linked, can be any string. Otherwise, must map to a config category", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/comment.py b/langfuse/api/commons/types/comment.py index 31daeeed8..2668fc027 100644 --- a/langfuse/api/commons/types/comment.py +++ b/langfuse/api/commons/types/comment.py @@ -12,24 +12,33 @@ class Comment(UniversalBaseModel): id: str - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] + project_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="projectId"), pydantic.Field(alias="projectId") + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] object_type: typing_extensions.Annotated[ - CommentObjectType, FieldMetadata(alias="objectType") + CommentObjectType, + FieldMetadata(alias="objectType"), + pydantic.Field(alias="objectType"), + ] + object_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="objectId"), pydantic.Field(alias="objectId") ] - object_id: typing_extensions.Annotated[str, FieldMetadata(alias="objectId")] content: str author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = pydantic.Field(default=None) - """ - The user ID of the comment author - """ + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field( + alias="authorUserId", + default=None, + description="The user ID of the comment author", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/correction_score.py b/langfuse/api/commons/types/correction_score.py index 9b37071f4..9a6a8a340 100644 --- a/langfuse/api/commons/types/correction_score.py +++ b/langfuse/api/commons/types/correction_score.py @@ -15,11 +15,13 @@ class CorrectionScore(BaseScore): """ string_value: typing_extensions.Annotated[ - str, FieldMetadata(alias="stringValue") - ] = pydantic.Field() - """ - The string representation of the correction content - """ + str, + FieldMetadata(alias="stringValue"), + pydantic.Field( + alias="stringValue", + description="The string representation of the correction content", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/dataset.py b/langfuse/api/commons/types/dataset.py index d312b291a..990fe532e 100644 --- a/langfuse/api/commons/types/dataset.py +++ b/langfuse/api/commons/types/dataset.py @@ -23,25 +23,31 @@ class Dataset(UniversalBaseModel): """ input_schema: typing_extensions.Annotated[ - typing.Optional[typing.Any], FieldMetadata(alias="inputSchema") - ] = pydantic.Field(default=None) - """ - JSON Schema for validating dataset item inputs - """ - + typing.Optional[typing.Any], + FieldMetadata(alias="inputSchema"), + pydantic.Field( + alias="inputSchema", + default=None, + description="JSON Schema for validating dataset item inputs", + ), + ] expected_output_schema: typing_extensions.Annotated[ - typing.Optional[typing.Any], FieldMetadata(alias="expectedOutputSchema") - ] = pydantic.Field(default=None) - """ - JSON Schema for validating dataset item expected outputs - """ - - project_id: typing_extensions.Annotated[str, FieldMetadata(alias="projectId")] + typing.Optional[typing.Any], + FieldMetadata(alias="expectedOutputSchema"), + pydantic.Field( + alias="expectedOutputSchema", + default=None, + description="JSON Schema for validating dataset item expected outputs", + ), + ] + project_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="projectId"), pydantic.Field(alias="projectId") + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/commons/types/dataset_item.py b/langfuse/api/commons/types/dataset_item.py index 54a13d81a..69378cd66 100644 --- a/langfuse/api/commons/types/dataset_item.py +++ b/langfuse/api/commons/types/dataset_item.py @@ -19,38 +19,46 @@ class DatasetItem(UniversalBaseModel): """ expected_output: typing_extensions.Annotated[ - typing.Any, FieldMetadata(alias="expectedOutput") - ] = pydantic.Field() - """ - Expected output for the dataset item - """ - + typing.Any, + FieldMetadata(alias="expectedOutput"), + pydantic.Field( + alias="expectedOutput", description="Expected output for the dataset item" + ), + ] metadata: typing.Any = pydantic.Field() """ Metadata associated with the dataset item """ source_trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sourceTraceId") - ] = pydantic.Field(default=None) - """ - The trace ID that sourced this dataset item - """ - + typing.Optional[str], + FieldMetadata(alias="sourceTraceId"), + pydantic.Field( + alias="sourceTraceId", + default=None, + description="The trace ID that sourced this dataset item", + ), + ] source_observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sourceObservationId") - ] = pydantic.Field(default=None) - """ - The observation ID that sourced this dataset item - """ - - dataset_id: typing_extensions.Annotated[str, FieldMetadata(alias="datasetId")] - dataset_name: typing_extensions.Annotated[str, FieldMetadata(alias="datasetName")] + typing.Optional[str], + FieldMetadata(alias="sourceObservationId"), + pydantic.Field( + alias="sourceObservationId", + default=None, + description="The observation ID that sourced this dataset item", + ), + ] + dataset_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="datasetId"), pydantic.Field(alias="datasetId") + ] + dataset_name: typing_extensions.Annotated[ + str, FieldMetadata(alias="datasetName"), pydantic.Field(alias="datasetName") + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/commons/types/dataset_run.py b/langfuse/api/commons/types/dataset_run.py index 0b5d8566c..4d5ae236c 100644 --- a/langfuse/api/commons/types/dataset_run.py +++ b/langfuse/api/commons/types/dataset_run.py @@ -30,33 +30,34 @@ class DatasetRun(UniversalBaseModel): Metadata of the dataset run """ - dataset_id: typing_extensions.Annotated[str, FieldMetadata(alias="datasetId")] = ( - pydantic.Field() - ) - """ - Id of the associated dataset - """ - + dataset_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="datasetId"), + pydantic.Field(alias="datasetId", description="Id of the associated dataset"), + ] dataset_name: typing_extensions.Annotated[ - str, FieldMetadata(alias="datasetName") - ] = pydantic.Field() - """ - Name of the associated dataset - """ - + str, + FieldMetadata(alias="datasetName"), + pydantic.Field( + alias="datasetName", description="Name of the associated dataset" + ), + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") - ] = pydantic.Field() - """ - The date and time when the dataset run was created - """ - + dt.datetime, + FieldMetadata(alias="createdAt"), + pydantic.Field( + alias="createdAt", + description="The date and time when the dataset run was created", + ), + ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") - ] = pydantic.Field() - """ - The date and time when the dataset run was last updated - """ + dt.datetime, + FieldMetadata(alias="updatedAt"), + pydantic.Field( + alias="updatedAt", + description="The date and time when the dataset run was last updated", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/commons/types/dataset_run_item.py b/langfuse/api/commons/types/dataset_run_item.py index 1ee364ffa..7c84b8d49 100644 --- a/langfuse/api/commons/types/dataset_run_item.py +++ b/langfuse/api/commons/types/dataset_run_item.py @@ -12,27 +12,33 @@ class DatasetRunItem(UniversalBaseModel): id: str dataset_run_id: typing_extensions.Annotated[ - str, FieldMetadata(alias="datasetRunId") + str, FieldMetadata(alias="datasetRunId"), pydantic.Field(alias="datasetRunId") ] dataset_run_name: typing_extensions.Annotated[ - str, FieldMetadata(alias="datasetRunName") + str, + FieldMetadata(alias="datasetRunName"), + pydantic.Field(alias="datasetRunName"), ] dataset_item_id: typing_extensions.Annotated[ - str, FieldMetadata(alias="datasetItemId") + str, FieldMetadata(alias="datasetItemId"), pydantic.Field(alias="datasetItemId") + ] + trace_id: typing_extensions.Annotated[ + str, FieldMetadata(alias="traceId"), pydantic.Field(alias="traceId") ] - trace_id: typing_extensions.Annotated[str, FieldMetadata(alias="traceId")] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = pydantic.Field(default=None) - """ - The observation ID associated with this run item - """ - + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field( + alias="observationId", + default=None, + description="The observation ID associated with this run item", + ), + ] created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/commons/types/dataset_run_with_items.py b/langfuse/api/commons/types/dataset_run_with_items.py index b5995dd30..fbff6192c 100644 --- a/langfuse/api/commons/types/dataset_run_with_items.py +++ b/langfuse/api/commons/types/dataset_run_with_items.py @@ -11,7 +11,9 @@ class DatasetRunWithItems(DatasetRun): dataset_run_items: typing_extensions.Annotated[ - typing.List[DatasetRunItem], FieldMetadata(alias="datasetRunItems") + typing.List[DatasetRunItem], + FieldMetadata(alias="datasetRunItems"), + pydantic.Field(alias="datasetRunItems"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/commons/types/model.py b/langfuse/api/commons/types/model.py index e09313e8a..73f19f88b 100644 --- a/langfuse/api/commons/types/model.py +++ b/langfuse/api/commons/types/model.py @@ -26,77 +26,92 @@ class Model(UniversalBaseModel): """ id: str - model_name: typing_extensions.Annotated[str, FieldMetadata(alias="modelName")] = ( - pydantic.Field() - ) - """ - Name of the model definition. If multiple with the same name exist, they are applied in the following order: (1) custom over built-in, (2) newest according to startTime where model.startTime typing.Dict[str, str]: + import platform + headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", + "X-Fern-Runtime": f"python/{platform.python_version()}", + "X-Fern-Platform": f"{platform.system().lower()}/{platform.release()}", **(self.get_custom_headers() or {}), } username = self._get_username() @@ -79,6 +86,7 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, httpx_client: httpx.Client, ): super().__init__( @@ -90,12 +98,14 @@ def __init__( headers=headers, base_url=base_url, timeout=timeout, + logging=logging, ) self.httpx_client = HttpClient( httpx_client=httpx_client, base_headers=self.get_headers, base_timeout=self.get_timeout, base_url=self.get_base_url, + logging_config=self._logging, ) @@ -111,6 +121,7 @@ def __init__( headers: typing.Optional[typing.Dict[str, str]] = None, base_url: str, timeout: typing.Optional[float] = None, + logging: typing.Optional[typing.Union[LogConfig, Logger]] = None, async_token: typing.Optional[typing.Callable[[], typing.Awaitable[str]]] = None, httpx_client: httpx.AsyncClient, ): @@ -123,6 +134,7 @@ def __init__( headers=headers, base_url=base_url, timeout=timeout, + logging=logging, ) self._async_token = async_token self.httpx_client = AsyncHttpClient( @@ -131,6 +143,7 @@ def __init__( base_timeout=self.get_timeout, base_url=self.get_base_url, async_base_headers=self.async_get_headers, + logging_config=self._logging, ) async def async_get_headers(self) -> typing.Dict[str, str]: diff --git a/langfuse/api/core/datetime_utils.py b/langfuse/api/core/datetime_utils.py index 47344e9d9..4f16a5511 100644 --- a/langfuse/api/core/datetime_utils.py +++ b/langfuse/api/core/datetime_utils.py @@ -1,6 +1,50 @@ # This file was auto-generated by Fern from our API Definition. import datetime as dt +from email.utils import parsedate_to_datetime +from typing import Any + +import pydantic + +IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") + + +def parse_rfc2822_datetime(v: Any) -> dt.datetime: + """ + Parse an RFC 2822 datetime string (e.g., "Wed, 02 Oct 2002 13:00:00 GMT") + into a datetime object. If the value is already a datetime, return it as-is. + Falls back to ISO 8601 parsing if RFC 2822 parsing fails. + """ + if isinstance(v, dt.datetime): + return v + if isinstance(v, str): + try: + return parsedate_to_datetime(v) + except Exception: + pass + # Fallback to ISO 8601 parsing + return dt.datetime.fromisoformat(v.replace("Z", "+00:00")) + raise ValueError(f"Expected str or datetime, got {type(v)}") + + +class Rfc2822DateTime(dt.datetime): + """A datetime subclass that parses RFC 2822 date strings. + + On Pydantic V1, uses __get_validators__ for pre-validation. + On Pydantic V2, uses __get_pydantic_core_schema__ for BeforeValidator-style parsing. + """ + + @classmethod + def __get_validators__(cls): # type: ignore[no-untyped-def] + yield parse_rfc2822_datetime + + @classmethod + def __get_pydantic_core_schema__(cls, _source_type: Any, _handler: Any) -> Any: # type: ignore[override] + from pydantic_core import core_schema + + return core_schema.no_info_before_validator_function( + parse_rfc2822_datetime, core_schema.datetime_schema() + ) def serialize_datetime(v: dt.datetime) -> str: diff --git a/langfuse/api/core/http_client.py b/langfuse/api/core/http_client.py index 3025a49ba..de75f4f68 100644 --- a/langfuse/api/core/http_client.py +++ b/langfuse/api/core/http_client.py @@ -5,7 +5,6 @@ import re import time import typing -import urllib.parse from contextlib import asynccontextmanager, contextmanager from random import random @@ -13,6 +12,7 @@ from .file import File, convert_file_dict_to_httpx_tuples from .force_multipart import FORCE_MULTIPART from .jsonable_encoder import jsonable_encoder +from .logging import LogConfig, Logger, create_logger from .query_encoder import encode_query from .remove_none_from_dict import remove_none_from_dict as remove_none_from_dict from .request_options import RequestOptions @@ -125,6 +125,59 @@ def _should_retry(response: httpx.Response) -> bool: return response.status_code >= 500 or response.status_code in retryable_400s +_SENSITIVE_HEADERS = frozenset( + { + "authorization", + "www-authenticate", + "x-api-key", + "api-key", + "apikey", + "x-api-token", + "x-auth-token", + "auth-token", + "cookie", + "set-cookie", + "proxy-authorization", + "proxy-authenticate", + "x-csrf-token", + "x-xsrf-token", + "x-session-token", + "x-access-token", + } +) + + +def _redact_headers(headers: typing.Dict[str, str]) -> typing.Dict[str, str]: + return { + k: ("[REDACTED]" if k.lower() in _SENSITIVE_HEADERS else v) + for k, v in headers.items() + } + + +def _build_url(base_url: str, path: typing.Optional[str]) -> str: + """ + Build a full URL by joining a base URL with a path. + + This function correctly handles base URLs that contain path prefixes (e.g., tenant-based URLs) + by using string concatenation instead of urllib.parse.urljoin(), which would incorrectly + strip path components when the path starts with '/'. + + Example: + >>> _build_url("https://cloud.example.com/org/tenant/api", "/users") + 'https://cloud.example.com/org/tenant/api/users' + + Args: + base_url: The base URL, which may contain path prefixes. + path: The path to append. Can be None or empty string. + + Returns: + The full URL with base_url and path properly joined. + """ + if not path: + return base_url + return f"{base_url.rstrip('/')}/{path.lstrip('/')}" + + def _maybe_filter_none_from_multipart_data( data: typing.Optional[typing.Any], request_files: typing.Optional[RequestFiles], @@ -200,10 +253,20 @@ def get_request_body( # If both data and json are None, we send json data in the event extra properties are specified json_body = maybe_filter_request_body(json, request_options, omit) - # If you have an empty JSON body, you should just send None - return ( - json_body if json_body != {} else None - ), data_body if data_body != {} else None + has_additional_body_parameters = bool( + request_options is not None + and request_options.get("additional_body_parameters") + ) + + # Only collapse empty dict to None when the body was not explicitly provided + # and there are no additional body parameters. This preserves explicit empty + # bodies (e.g., when an endpoint has a request body type but all fields are optional). + if json_body == {} and json is None and not has_additional_body_parameters: + json_body = None + if data_body == {} and data is None and not has_additional_body_parameters: + data_body = None + + return json_body, data_body class HttpClient: @@ -214,11 +277,13 @@ def __init__( base_timeout: typing.Callable[[], typing.Optional[float]], base_headers: typing.Callable[[], typing.Dict[str, str]], base_url: typing.Optional[typing.Callable[[], str]] = None, + logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): self.base_url = base_url self.base_timeout = base_timeout self.base_headers = base_headers self.httpx_client = httpx_client + self.logger = create_logger(logging_config) def get_base_url(self, maybe_base_url: typing.Optional[str]) -> str: base_url = maybe_base_url @@ -305,22 +370,34 @@ def request( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + has_body=json_body is not None or data_body is not None, + ) + response = self.httpx_client.request( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **( - request_options.get("additional_headers", {}) or {} - if request_options is not None - else {} - ), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -349,6 +426,24 @@ def request( omit=omit, ) + if self.logger.is_debug(): + if 200 <= response.status_code < 400: + self.logger.debug( + "HTTP request succeeded", + method=method, + url=_request_url, + status_code=response.status_code, + ) + + if self.logger.is_error(): + if response.status_code >= 400: + self.logger.error( + "HTTP request failed with error status", + method=method, + url=_request_url, + status_code=response.status_code, + ) + return response @contextmanager @@ -425,22 +520,33 @@ def stream( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **self.base_headers(), + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making streaming HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + ) + with self.httpx_client.stream( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **self.base_headers(), - **(headers if headers is not None else {}), - **( - request_options.get("additional_headers", {}) - if request_options is not None - else {} - ), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -462,12 +568,14 @@ def __init__( async_base_headers: typing.Optional[ typing.Callable[[], typing.Awaitable[typing.Dict[str, str]]] ] = None, + logging_config: typing.Optional[typing.Union[LogConfig, Logger]] = None, ): self.base_url = base_url self.base_timeout = base_timeout self.base_headers = base_headers self.async_base_headers = async_base_headers self.httpx_client = httpx_client + self.logger = create_logger(logging_config) async def _get_headers(self) -> typing.Dict[str, str]: if self.async_base_headers is not None: @@ -562,23 +670,34 @@ async def request( ) ) - # Add the input to each of these and do None-safety checks + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **_headers, + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) or {} + if request_options is not None + else {} + ), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + has_body=json_body is not None or data_body is not None, + ) + response = await self.httpx_client.request( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **_headers, - **(headers if headers is not None else {}), - **( - request_options.get("additional_headers", {}) or {} - if request_options is not None - else {} - ), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, @@ -606,6 +725,25 @@ async def request( retries=retries + 1, omit=omit, ) + + if self.logger.is_debug(): + if 200 <= response.status_code < 400: + self.logger.debug( + "HTTP request succeeded", + method=method, + url=_request_url, + status_code=response.status_code, + ) + + if self.logger.is_error(): + if response.status_code >= 400: + self.logger.error( + "HTTP request failed with error status", + method=method, + url=_request_url, + status_code=response.status_code, + ) + return response @asynccontextmanager @@ -685,22 +823,33 @@ async def stream( ) ) + _request_url = _build_url(base_url, path) + _request_headers = jsonable_encoder( + remove_none_from_dict( + { + **_headers, + **(headers if headers is not None else {}), + **( + request_options.get("additional_headers", {}) + if request_options is not None + else {} + ), + } + ) + ) + + if self.logger.is_debug(): + self.logger.debug( + "Making streaming HTTP request", + method=method, + url=_request_url, + headers=_redact_headers(_request_headers), + ) + async with self.httpx_client.stream( method=method, - url=urllib.parse.urljoin(f"{base_url}/", path), - headers=jsonable_encoder( - remove_none_from_dict( - { - **_headers, - **(headers if headers is not None else {}), - **( - request_options.get("additional_headers", {}) - if request_options is not None - else {} - ), - } - ) - ), + url=_request_url, + headers=_request_headers, params=_encoded_params if _encoded_params else None, json=json_body, data=data_body, diff --git a/langfuse/api/core/http_response.py b/langfuse/api/core/http_response.py index 2479747e8..00bb1096d 100644 --- a/langfuse/api/core/http_response.py +++ b/langfuse/api/core/http_response.py @@ -9,7 +9,7 @@ class BaseHttpResponse: - """Minimalist HTTP response wrapper that exposes response headers.""" + """Minimalist HTTP response wrapper that exposes response headers and status code.""" _response: httpx.Response @@ -20,6 +20,10 @@ def __init__(self, response: httpx.Response): def headers(self) -> Dict[str, str]: return dict(self._response.headers) + @property + def status_code(self) -> int: + return self._response.status_code + class HttpResponse(Generic[T], BaseHttpResponse): """HTTP response wrapper that exposes response headers and data.""" diff --git a/langfuse/api/core/jsonable_encoder.py b/langfuse/api/core/jsonable_encoder.py index 90f53dfa7..d2d77880b 100644 --- a/langfuse/api/core/jsonable_encoder.py +++ b/langfuse/api/core/jsonable_encoder.py @@ -32,6 +32,10 @@ def jsonable_encoder( obj: Any, custom_encoder: Optional[Dict[Any, Callable[[Any], Any]]] = None ) -> Any: custom_encoder = custom_encoder or {} + # Generated SDKs use Ellipsis (`...`) as the sentinel value for "OMIT". + # OMIT values should be excluded from serialized payloads. + if obj is Ellipsis: + return None if custom_encoder: if type(obj) in custom_encoder: return custom_encoder[type(obj)](obj) @@ -72,6 +76,8 @@ def jsonable_encoder( allowed_keys = set(obj.keys()) for key, value in obj.items(): if key in allowed_keys: + if value is Ellipsis: + continue encoded_key = jsonable_encoder(key, custom_encoder=custom_encoder) encoded_value = jsonable_encoder(value, custom_encoder=custom_encoder) encoded_dict[encoded_key] = encoded_value @@ -79,6 +85,8 @@ def jsonable_encoder( if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)): encoded_list = [] for item in obj: + if item is Ellipsis: + continue encoded_list.append(jsonable_encoder(item, custom_encoder=custom_encoder)) return encoded_list diff --git a/langfuse/api/core/logging.py b/langfuse/api/core/logging.py new file mode 100644 index 000000000..b879d5c00 --- /dev/null +++ b/langfuse/api/core/logging.py @@ -0,0 +1,109 @@ +# This file was auto-generated by Fern from our API Definition. + +import logging +import typing + +LogLevel = typing.Literal["debug", "info", "warn", "error"] + +_LOG_LEVEL_MAP: typing.Dict[LogLevel, int] = { + "debug": 1, + "info": 2, + "warn": 3, + "error": 4, +} + + +class ILogger(typing.Protocol): + def debug(self, message: str, **kwargs: typing.Any) -> None: ... + def info(self, message: str, **kwargs: typing.Any) -> None: ... + def warn(self, message: str, **kwargs: typing.Any) -> None: ... + def error(self, message: str, **kwargs: typing.Any) -> None: ... + + +class ConsoleLogger: + _logger: logging.Logger + + def __init__(self) -> None: + self._logger = logging.getLogger("fern") + if not self._logger.handlers: + handler = logging.StreamHandler() + handler.setFormatter(logging.Formatter("%(levelname)s - %(message)s")) + self._logger.addHandler(handler) + self._logger.setLevel(logging.DEBUG) + + def debug(self, message: str, **kwargs: typing.Any) -> None: + self._logger.debug(message, extra=kwargs) + + def info(self, message: str, **kwargs: typing.Any) -> None: + self._logger.info(message, extra=kwargs) + + def warn(self, message: str, **kwargs: typing.Any) -> None: + self._logger.warning(message, extra=kwargs) + + def error(self, message: str, **kwargs: typing.Any) -> None: + self._logger.error(message, extra=kwargs) + + +class LogConfig(typing.TypedDict, total=False): + level: LogLevel + logger: ILogger + silent: bool + + +class Logger: + _level: int + _logger: ILogger + _silent: bool + + def __init__(self, *, level: LogLevel, logger: ILogger, silent: bool) -> None: + self._level = _LOG_LEVEL_MAP[level] + self._logger = logger + self._silent = silent + + def _should_log(self, level: LogLevel) -> bool: + return not self._silent and self._level <= _LOG_LEVEL_MAP[level] + + def is_debug(self) -> bool: + return self._should_log("debug") + + def is_info(self) -> bool: + return self._should_log("info") + + def is_warn(self) -> bool: + return self._should_log("warn") + + def is_error(self) -> bool: + return self._should_log("error") + + def debug(self, message: str, **kwargs: typing.Any) -> None: + if self.is_debug(): + self._logger.debug(message, **kwargs) + + def info(self, message: str, **kwargs: typing.Any) -> None: + if self.is_info(): + self._logger.info(message, **kwargs) + + def warn(self, message: str, **kwargs: typing.Any) -> None: + if self.is_warn(): + self._logger.warn(message, **kwargs) + + def error(self, message: str, **kwargs: typing.Any) -> None: + if self.is_error(): + self._logger.error(message, **kwargs) + + +_default_logger: Logger = Logger(level="info", logger=ConsoleLogger(), silent=True) + + +def create_logger( + config: typing.Optional[typing.Union[LogConfig, Logger]] = None, +) -> Logger: + if config is None: + return _default_logger + if isinstance(config, Logger): + return config + return Logger( + level=config.get("level", "info"), + logger=config.get("logger", ConsoleLogger()), + silent=config.get("silent", True), + ) diff --git a/langfuse/api/core/parse_error.py b/langfuse/api/core/parse_error.py new file mode 100644 index 000000000..4527c6a8a --- /dev/null +++ b/langfuse/api/core/parse_error.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from typing import Any, Dict, Optional + + +class ParsingError(Exception): + """ + Raised when the SDK fails to parse/validate a response from the server. + This typically indicates that the server returned a response whose shape + does not match the expected schema. + """ + + headers: Optional[Dict[str, str]] + status_code: Optional[int] + body: Any + cause: Optional[Exception] + + def __init__( + self, + *, + headers: Optional[Dict[str, str]] = None, + status_code: Optional[int] = None, + body: Any = None, + cause: Optional[Exception] = None, + ) -> None: + self.headers = headers + self.status_code = status_code + self.body = body + self.cause = cause + super().__init__() + if cause is not None: + self.__cause__ = cause + + def __str__(self) -> str: + cause_str = f", cause: {self.cause}" if self.cause is not None else "" + return f"headers: {self.headers}, status_code: {self.status_code}, body: {self.body}{cause_str}" diff --git a/langfuse/api/core/pydantic_utilities.py b/langfuse/api/core/pydantic_utilities.py index d2b7b51b6..9693a8781 100644 --- a/langfuse/api/core/pydantic_utilities.py +++ b/langfuse/api/core/pydantic_utilities.py @@ -2,8 +2,13 @@ # nopycln: file import datetime as dt +import inspect +import json +import logging from collections import defaultdict +from dataclasses import asdict from typing import ( + TYPE_CHECKING, Any, Callable, ClassVar, @@ -20,26 +25,106 @@ ) import pydantic +import typing_extensions +from pydantic.fields import FieldInfo as _FieldInfo + +_logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from .http_sse._models import ServerSentEvent IS_PYDANTIC_V2 = pydantic.VERSION.startswith("2.") if IS_PYDANTIC_V2: - from pydantic.v1.datetime_parse import parse_date as parse_date - from pydantic.v1.datetime_parse import parse_datetime as parse_datetime - from pydantic.v1.fields import ModelField as ModelField - from pydantic.v1.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[attr-defined] - from pydantic.v1.typing import get_args as get_args - from pydantic.v1.typing import get_origin as get_origin - from pydantic.v1.typing import is_literal_type as is_literal_type - from pydantic.v1.typing import is_union as is_union + _datetime_adapter = pydantic.TypeAdapter(dt.datetime) # type: ignore[attr-defined] + _date_adapter = pydantic.TypeAdapter(dt.date) # type: ignore[attr-defined] + + def parse_datetime(value: Any) -> dt.datetime: # type: ignore[misc] + if isinstance(value, dt.datetime): + return value + return _datetime_adapter.validate_python(value) + + def parse_date(value: Any) -> dt.date: # type: ignore[misc] + if isinstance(value, dt.datetime): + return value.date() + if isinstance(value, dt.date): + return value + return _date_adapter.validate_python(value) + + # Avoid importing from pydantic.v1 to maintain Python 3.14 compatibility. + from typing import get_args as get_args # type: ignore[assignment] + from typing import get_origin as get_origin # type: ignore[assignment] + + def is_literal_type(tp: Optional[Type[Any]]) -> bool: # type: ignore[misc] + return typing_extensions.get_origin(tp) is typing_extensions.Literal + + def is_union(tp: Optional[Type[Any]]) -> bool: # type: ignore[misc] + return tp is Union or typing_extensions.get_origin(tp) is Union # type: ignore[comparison-overlap] + + # Inline encoders_by_type to avoid importing from pydantic.v1.json + import re as _re + from collections import deque as _deque + from decimal import Decimal as _Decimal + from enum import Enum as _Enum + from ipaddress import ( + IPv4Address as _IPv4Address, + ) + from ipaddress import ( + IPv4Interface as _IPv4Interface, + ) + from ipaddress import ( + IPv4Network as _IPv4Network, + ) + from ipaddress import ( + IPv6Address as _IPv6Address, + ) + from ipaddress import ( + IPv6Interface as _IPv6Interface, + ) + from ipaddress import ( + IPv6Network as _IPv6Network, + ) + from pathlib import Path as _Path + from types import GeneratorType as _GeneratorType + from uuid import UUID as _UUID + + from pydantic.fields import FieldInfo as ModelField # type: ignore[no-redef, assignment] + + def _decimal_encoder(dec_value: Any) -> Any: + if dec_value.as_tuple().exponent >= 0: + return int(dec_value) + return float(dec_value) + + encoders_by_type: Dict[Type[Any], Callable[[Any], Any]] = { # type: ignore[no-redef] + bytes: lambda o: o.decode(), + dt.date: lambda o: o.isoformat(), + dt.datetime: lambda o: o.isoformat(), + dt.time: lambda o: o.isoformat(), + dt.timedelta: lambda td: td.total_seconds(), + _Decimal: _decimal_encoder, + _Enum: lambda o: o.value, + frozenset: list, + _deque: list, + _GeneratorType: list, + _IPv4Address: str, + _IPv4Interface: str, + _IPv4Network: str, + _IPv6Address: str, + _IPv6Interface: str, + _IPv6Network: str, + _Path: str, + _re.Pattern: lambda o: o.pattern, + set: list, + _UUID: str, + } else: from pydantic.datetime_parse import parse_date as parse_date # type: ignore[no-redef] from pydantic.datetime_parse import parse_datetime as parse_datetime # type: ignore[no-redef] - from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef] + from pydantic.fields import ModelField as ModelField # type: ignore[attr-defined, no-redef, assignment] from pydantic.json import ENCODERS_BY_TYPE as encoders_by_type # type: ignore[no-redef] from pydantic.typing import get_args as get_args # type: ignore[no-redef] from pydantic.typing import get_origin as get_origin # type: ignore[no-redef] - from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef] + from pydantic.typing import is_literal_type as is_literal_type # type: ignore[no-redef, assignment] from pydantic.typing import is_union as is_union # type: ignore[no-redef] from .datetime_utils import serialize_datetime @@ -50,10 +135,220 @@ Model = TypeVar("Model", bound=pydantic.BaseModel) +def _get_discriminator_and_variants( + type_: Type[Any], +) -> Tuple[Optional[str], Optional[List[Type[Any]]]]: + """ + Extract the discriminator field name and union variants from a discriminated union type. + Supports Annotated[Union[...], Field(discriminator=...)] patterns. + Returns (discriminator, variants) or (None, None) if not a discriminated union. + """ + origin = typing_extensions.get_origin(type_) + + if origin is typing_extensions.Annotated: + args = typing_extensions.get_args(type_) + if len(args) >= 2: + inner_type = args[0] + # Check annotations for discriminator + discriminator = None + for annotation in args[1:]: + if hasattr(annotation, "discriminator"): + discriminator = getattr(annotation, "discriminator", None) + break + + if discriminator: + inner_origin = typing_extensions.get_origin(inner_type) + if inner_origin is Union: + variants = list(typing_extensions.get_args(inner_type)) + return discriminator, variants + return None, None + + +def _get_field_annotation(model: Type[Any], field_name: str) -> Optional[Type[Any]]: + """Get the type annotation of a field from a Pydantic model.""" + if IS_PYDANTIC_V2: + fields = getattr(model, "model_fields", {}) + field_info = fields.get(field_name) + if field_info: + return cast(Optional[Type[Any]], field_info.annotation) + else: + fields = getattr(model, "__fields__", {}) + field_info = fields.get(field_name) + if field_info: + return cast(Optional[Type[Any]], field_info.outer_type_) + return None + + +def _find_variant_by_discriminator( + variants: List[Type[Any]], + discriminator: str, + discriminator_value: Any, +) -> Optional[Type[Any]]: + """Find the union variant that matches the discriminator value.""" + for variant in variants: + if not (inspect.isclass(variant) and issubclass(variant, pydantic.BaseModel)): + continue + + disc_annotation = _get_field_annotation(variant, discriminator) + if disc_annotation and is_literal_type(disc_annotation): + literal_args = get_args(disc_annotation) + if literal_args and literal_args[0] == discriminator_value: + return variant + return None + + +def _is_string_type(type_: Type[Any]) -> bool: + """Check if a type is str or Optional[str].""" + if type_ is str: + return True + + origin = typing_extensions.get_origin(type_) + if origin is Union: + args = typing_extensions.get_args(type_) + # Optional[str] = Union[str, None] + non_none_args = [a for a in args if a is not type(None)] + if len(non_none_args) == 1 and non_none_args[0] is str: + return True + + return False + + +def parse_sse_obj(sse: "ServerSentEvent", type_: Type[T]) -> T: + """ + Parse a ServerSentEvent into the appropriate type. + + Handles two scenarios based on where the discriminator field is located: + + 1. Data-level discrimination: The discriminator (e.g., 'type') is inside the 'data' payload. + The union describes the data content, not the SSE envelope. + -> Returns: json.loads(data) parsed into the type + + Example: ChatStreamResponse with discriminator='type' + Input: ServerSentEvent(event="message", data='{"type": "content-delta", ...}', id="") + Output: ContentDeltaEvent (parsed from data, SSE envelope stripped) + + 2. Event-level discrimination: The discriminator (e.g., 'event') is at the SSE event level. + The union describes the full SSE event structure. + -> Returns: SSE envelope with 'data' field JSON-parsed only if the variant expects non-string + + Example: JobStreamResponse with discriminator='event' + Input: ServerSentEvent(event="ERROR", data='{"code": "FAILED", ...}', id="123") + Output: JobStreamResponse_Error with data as ErrorData object + + But for variants where data is str (like STATUS_UPDATE): + Input: ServerSentEvent(event="STATUS_UPDATE", data='{"status": "processing"}', id="1") + Output: JobStreamResponse_StatusUpdate with data as string (not parsed) + + Args: + sse: The ServerSentEvent object to parse + type_: The target discriminated union type + + Returns: + The parsed object of type T + + Note: + This function is only available in SDK contexts where http_sse module exists. + """ + sse_event = asdict(sse) + discriminator, variants = _get_discriminator_and_variants(type_) + + if discriminator is None or variants is None: + # Not a discriminated union - parse the data field as JSON + data_value = sse_event.get("data") + if isinstance(data_value, str) and data_value: + try: + parsed_data = json.loads(data_value) + return parse_obj_as(type_, parsed_data) + except json.JSONDecodeError as e: + _logger.warning( + "Failed to parse SSE data field as JSON: %s, data: %s", + e, + data_value[:100] if len(data_value) > 100 else data_value, + ) + return parse_obj_as(type_, sse_event) + + data_value = sse_event.get("data") + + # Check if discriminator is at the top level (event-level discrimination) + if discriminator in sse_event: + # Case 2: Event-level discrimination + # Find the matching variant to check if 'data' field needs JSON parsing + disc_value = sse_event.get(discriminator) + matching_variant = _find_variant_by_discriminator( + variants, discriminator, disc_value + ) + + if matching_variant is not None: + # Check what type the variant expects for 'data' + data_type = _get_field_annotation(matching_variant, "data") + if data_type is not None and not _is_string_type(data_type): + # Variant expects non-string data - parse JSON + if isinstance(data_value, str) and data_value: + try: + parsed_data = json.loads(data_value) + new_object = dict(sse_event) + new_object["data"] = parsed_data + return parse_obj_as(type_, new_object) + except json.JSONDecodeError as e: + _logger.warning( + "Failed to parse SSE data field as JSON for event-level discrimination: %s, data: %s", + e, + data_value[:100] if len(data_value) > 100 else data_value, + ) + # Either no matching variant, data is string type, or JSON parse failed + return parse_obj_as(type_, sse_event) + + else: + # Case 1: Data-level discrimination + # The discriminator is inside the data payload - extract and parse data only + if isinstance(data_value, str) and data_value: + try: + parsed_data = json.loads(data_value) + return parse_obj_as(type_, parsed_data) + except json.JSONDecodeError as e: + _logger.warning( + "Failed to parse SSE data field as JSON for data-level discrimination: %s, data: %s", + e, + data_value[:100] if len(data_value) > 100 else data_value, + ) + return parse_obj_as(type_, sse_event) + + def parse_obj_as(type_: Type[T], object_: Any) -> T: - dealiased_object = convert_and_respect_annotation_metadata( - object_=object_, annotation=type_, direction="read" - ) + # convert_and_respect_annotation_metadata is required for TypedDict aliasing. + # + # For Pydantic models, whether we should pre-dealias depends on how the model encodes aliasing: + # - If the model uses real Pydantic aliases (pydantic.Field(alias=...)), then we must pass wire keys through + # unchanged so Pydantic can validate them. + # - If the model encodes aliasing only via FieldMetadata annotations, then we MUST pre-dealias because Pydantic + # will not recognize those aliases during validation. + if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel): + has_pydantic_aliases = False + if IS_PYDANTIC_V2: + for field_name, field_info in getattr(type_, "model_fields", {}).items(): # type: ignore[attr-defined] + alias = getattr(field_info, "alias", None) + if alias is not None and alias != field_name: + has_pydantic_aliases = True + break + else: + for field in getattr(type_, "__fields__", {}).values(): + alias = getattr(field, "alias", None) + name = getattr(field, "name", None) + if alias is not None and name is not None and alias != name: + has_pydantic_aliases = True + break + + dealiased_object = ( + object_ + if has_pydantic_aliases + else convert_and_respect_annotation_metadata( + object_=object_, annotation=type_, direction="read" + ) + ) + else: + dealiased_object = convert_and_respect_annotation_metadata( + object_=object_, annotation=type_, direction="read" + ) if IS_PYDANTIC_V2: adapter = pydantic.TypeAdapter(type_) # type: ignore[attr-defined] return adapter.validate_python(dealiased_object) @@ -77,6 +372,45 @@ class UniversalBaseModel(pydantic.BaseModel): protected_namespaces=(), ) + @pydantic.model_validator(mode="before") # type: ignore[attr-defined] + @classmethod + def _coerce_field_names_to_aliases(cls, data: Any) -> Any: + """ + Accept Python field names in input by rewriting them to their Pydantic aliases, + while avoiding silent collisions when a key could refer to multiple fields. + """ + if not isinstance(data, Mapping): + return data + + fields = getattr(cls, "model_fields", {}) # type: ignore[attr-defined] + name_to_alias: Dict[str, str] = {} + alias_to_name: Dict[str, str] = {} + + for name, field_info in fields.items(): + alias = getattr(field_info, "alias", None) or name + name_to_alias[name] = alias + if alias != name: + alias_to_name[alias] = name + + # Detect ambiguous keys: a key that is an alias for one field and a name for another. + ambiguous_keys = set(alias_to_name.keys()).intersection( + set(name_to_alias.keys()) + ) + for key in ambiguous_keys: + if key in data and name_to_alias[key] not in data: + raise ValueError( + f"Ambiguous input key '{key}': it is both a field name and an alias. " + "Provide the explicit alias key to disambiguate." + ) + + original_keys = set(data.keys()) + rewritten: Dict[str, Any] = dict(data) + for name, alias in name_to_alias.items(): + if alias != name and name in original_keys and alias not in rewritten: + rewritten[alias] = rewritten.pop(name) + + return rewritten + @pydantic.model_serializer(mode="plain", when_used="json") # type: ignore[attr-defined] def serialize_model(self) -> Any: # type: ignore[name-defined] serialized = self.dict() # type: ignore[attr-defined] @@ -92,6 +426,42 @@ class Config: smart_union = True json_encoders = {dt.datetime: serialize_datetime} + @pydantic.root_validator(pre=True) + def _coerce_field_names_to_aliases(cls, values: Any) -> Any: + """ + Pydantic v1 equivalent of _coerce_field_names_to_aliases. + """ + if not isinstance(values, Mapping): + return values + + fields = getattr(cls, "__fields__", {}) + name_to_alias: Dict[str, str] = {} + alias_to_name: Dict[str, str] = {} + + for name, field in fields.items(): + alias = getattr(field, "alias", None) or name + name_to_alias[name] = alias + if alias != name: + alias_to_name[alias] = name + + ambiguous_keys = set(alias_to_name.keys()).intersection( + set(name_to_alias.keys()) + ) + for key in ambiguous_keys: + if key in values and name_to_alias[key] not in values: + raise ValueError( + f"Ambiguous input key '{key}': it is both a field name and an alias. " + "Provide the explicit alias key to disambiguate." + ) + + original_keys = set(values.keys()) + rewritten: Dict[str, Any] = dict(values) + for name, alias in name_to_alias.items(): + if alias != name and name in original_keys and alias not in rewritten: + rewritten[alias] = rewritten.pop(name) + + return rewritten + @classmethod def model_construct( cls: Type["Model"], _fields_set: Optional[Set[str]] = None, **values: Any @@ -287,7 +657,7 @@ def decorator(func: AnyCallable) -> AnyCallable: return decorator -PydanticField = Union[ModelField, pydantic.fields.FieldInfo] +PydanticField = Union[ModelField, _FieldInfo] def _get_model_fields(model: Type["Model"]) -> Mapping[str, PydanticField]: diff --git a/langfuse/api/dataset_items/raw_client.py b/langfuse/api/dataset_items/raw_client.py index 6aeafb54d..121889c7c 100644 --- a/langfuse/api/dataset_items/raw_client.py +++ b/langfuse/api/dataset_items/raw_client.py @@ -16,10 +16,12 @@ from ..core.datetime_utils import serialize_datetime from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.delete_dataset_item_response import DeleteDatasetItemResponse from .types.paginated_dataset_items import PaginatedDatasetItems +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -160,6 +162,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -260,6 +269,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -392,6 +408,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -492,6 +515,13 @@ def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -634,6 +664,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -734,6 +771,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -866,6 +910,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -966,6 +1017,13 @@ async def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/dataset_items/types/create_dataset_item_request.py b/langfuse/api/dataset_items/types/create_dataset_item_request.py index b778e42ae..61b49097c 100644 --- a/langfuse/api/dataset_items/types/create_dataset_item_request.py +++ b/langfuse/api/dataset_items/types/create_dataset_item_request.py @@ -10,18 +10,26 @@ class CreateDatasetItemRequest(UniversalBaseModel): - dataset_name: typing_extensions.Annotated[str, FieldMetadata(alias="datasetName")] + dataset_name: typing_extensions.Annotated[ + str, FieldMetadata(alias="datasetName"), pydantic.Field(alias="datasetName") + ] input: typing.Optional[typing.Any] = None expected_output: typing_extensions.Annotated[ - typing.Optional[typing.Any], FieldMetadata(alias="expectedOutput") - ] = None + typing.Optional[typing.Any], + FieldMetadata(alias="expectedOutput"), + pydantic.Field(alias="expectedOutput", default=None), + ] metadata: typing.Optional[typing.Any] = None source_trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sourceTraceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sourceTraceId"), + pydantic.Field(alias="sourceTraceId", default=None), + ] source_observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sourceObservationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sourceObservationId"), + pydantic.Field(alias="sourceObservationId", default=None), + ] id: typing.Optional[str] = pydantic.Field(default=None) """ Dataset items are upserted on their id. Id needs to be unique (project-level) and cannot be reused across datasets. diff --git a/langfuse/api/dataset_run_items/raw_client.py b/langfuse/api/dataset_run_items/raw_client.py index f281b13d2..0afd6ce3f 100644 --- a/langfuse/api/dataset_run_items/raw_client.py +++ b/langfuse/api/dataset_run_items/raw_client.py @@ -13,9 +13,11 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.paginated_dataset_run_items import PaginatedDatasetRunItems +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -162,6 +164,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -282,6 +291,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -430,6 +446,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -550,6 +573,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/dataset_run_items/types/create_dataset_run_item_request.py b/langfuse/api/dataset_run_items/types/create_dataset_run_item_request.py index 169888912..2a916ef84 100644 --- a/langfuse/api/dataset_run_items/types/create_dataset_run_item_request.py +++ b/langfuse/api/dataset_run_items/types/create_dataset_run_item_request.py @@ -10,48 +10,58 @@ class CreateDatasetRunItemRequest(UniversalBaseModel): - run_name: typing_extensions.Annotated[str, FieldMetadata(alias="runName")] + run_name: typing_extensions.Annotated[ + str, FieldMetadata(alias="runName"), pydantic.Field(alias="runName") + ] run_description: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="runDescription") - ] = pydantic.Field(default=None) - """ - Description of the run. If run exists, description will be updated. - """ - + typing.Optional[str], + FieldMetadata(alias="runDescription"), + pydantic.Field( + alias="runDescription", + default=None, + description="Description of the run. If run exists, description will be updated.", + ), + ] metadata: typing.Optional[typing.Any] = pydantic.Field(default=None) """ Metadata of the dataset run, updates run if run already exists """ dataset_item_id: typing_extensions.Annotated[ - str, FieldMetadata(alias="datasetItemId") + str, FieldMetadata(alias="datasetItemId"), pydantic.Field(alias="datasetItemId") ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = pydantic.Field(default=None) - """ - traceId should always be provided. For compatibility with older SDK versions it can also be inferred from the provided observationId. - """ - + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field( + alias="traceId", + default=None, + description="traceId should always be provided. For compatibility with older SDK versions it can also be inferred from the provided observationId.", + ), + ] dataset_version: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="datasetVersion") - ] = pydantic.Field(default=None) - """ - ISO 8601 timestamp (RFC 3339, Section 5.6) in UTC (e.g., "2026-01-21T14:35:42Z"). - Specifies the dataset version to use for this experiment run. - If provided, the experiment will use dataset items as they existed at or before this timestamp. - If not provided, uses the latest version of dataset items. - """ - + typing.Optional[dt.datetime], + FieldMetadata(alias="datasetVersion"), + pydantic.Field( + alias="datasetVersion", + default=None, + description='ISO 8601 timestamp (RFC 3339, Section 5.6) in UTC (e.g., "2026-01-21T14:35:42Z").\nSpecifies the dataset version to use for this experiment run. \nIf provided, the experiment will use dataset items as they existed at or before this timestamp.\nIf not provided, uses the latest version of dataset items.', + ), + ] created_at: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="createdAt") - ] = pydantic.Field(default=None) - """ - Optional timestamp to set the createdAt field of the dataset run item. If not provided or null, defaults to current timestamp. - """ + typing.Optional[dt.datetime], + FieldMetadata(alias="createdAt"), + pydantic.Field( + alias="createdAt", + default=None, + description="Optional timestamp to set the createdAt field of the dataset run item. If not provided or null, defaults to current timestamp.", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/datasets/raw_client.py b/langfuse/api/datasets/raw_client.py index 306ad8f76..3dc6c3ef6 100644 --- a/langfuse/api/datasets/raw_client.py +++ b/langfuse/api/datasets/raw_client.py @@ -14,11 +14,13 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.delete_dataset_run_response import DeleteDatasetRunResponse from .types.paginated_dataset_runs import PaginatedDatasetRuns from .types.paginated_datasets import PaginatedDatasets +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -134,6 +136,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -237,6 +246,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -362,6 +378,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -468,6 +491,13 @@ def get_run( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -574,6 +604,13 @@ def delete_run( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -689,6 +726,13 @@ def get_runs( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -806,6 +850,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -909,6 +960,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1034,6 +1092,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1140,6 +1205,13 @@ async def get_run( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1246,6 +1318,13 @@ async def delete_run( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1361,6 +1440,13 @@ async def get_runs( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/datasets/types/create_dataset_request.py b/langfuse/api/datasets/types/create_dataset_request.py index 9d9b01089..dc9b7ebec 100644 --- a/langfuse/api/datasets/types/create_dataset_request.py +++ b/langfuse/api/datasets/types/create_dataset_request.py @@ -13,18 +13,23 @@ class CreateDatasetRequest(UniversalBaseModel): description: typing.Optional[str] = None metadata: typing.Optional[typing.Any] = None input_schema: typing_extensions.Annotated[ - typing.Optional[typing.Any], FieldMetadata(alias="inputSchema") - ] = pydantic.Field(default=None) - """ - JSON Schema for validating dataset item inputs. When set, all new and existing dataset items will be validated against this schema. - """ - + typing.Optional[typing.Any], + FieldMetadata(alias="inputSchema"), + pydantic.Field( + alias="inputSchema", + default=None, + description="JSON Schema for validating dataset item inputs. When set, all new and existing dataset items will be validated against this schema.", + ), + ] expected_output_schema: typing_extensions.Annotated[ - typing.Optional[typing.Any], FieldMetadata(alias="expectedOutputSchema") - ] = pydantic.Field(default=None) - """ - JSON Schema for validating dataset item expected outputs. When set, all new and existing dataset items will be validated against this schema. - """ + typing.Optional[typing.Any], + FieldMetadata(alias="expectedOutputSchema"), + pydantic.Field( + alias="expectedOutputSchema", + default=None, + description="JSON Schema for validating dataset item expected outputs. When set, all new and existing dataset items will be validated against this schema.", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/health/raw_client.py b/langfuse/api/health/raw_client.py index afeef1a96..88d3a3b6e 100644 --- a/langfuse/api/health/raw_client.py +++ b/langfuse/api/health/raw_client.py @@ -11,10 +11,12 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .errors.service_unavailable_error import ServiceUnavailableError from .types.health_response import HealthResponse +from pydantic import ValidationError class RawHealthClient: @@ -115,6 +117,13 @@ def health( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -220,6 +229,13 @@ async def health( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/ingestion/raw_client.py b/langfuse/api/ingestion/raw_client.py index cb60a5fba..b7fbe78b9 100644 --- a/langfuse/api/ingestion/raw_client.py +++ b/langfuse/api/ingestion/raw_client.py @@ -11,11 +11,13 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from .types.ingestion_event import IngestionEvent from .types.ingestion_response import IngestionResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -150,6 +152,13 @@ def batch( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -286,6 +295,13 @@ async def batch( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/ingestion/types/create_generation_body.py b/langfuse/api/ingestion/types/create_generation_body.py index 72cb57116..85199d377 100644 --- a/langfuse/api/ingestion/types/create_generation_body.py +++ b/langfuse/api/ingestion/types/create_generation_body.py @@ -14,26 +14,37 @@ class CreateGenerationBody(CreateSpanBody): completion_start_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="completionStartTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="completionStartTime"), + pydantic.Field(alias="completionStartTime", default=None), + ] model: typing.Optional[str] = None model_parameters: typing_extensions.Annotated[ typing.Optional[typing.Dict[str, MapValue]], FieldMetadata(alias="modelParameters"), - ] = None + pydantic.Field(alias="modelParameters", default=None), + ] usage: typing.Optional[IngestionUsage] = None usage_details: typing_extensions.Annotated[ - typing.Optional[UsageDetails], FieldMetadata(alias="usageDetails") - ] = None + typing.Optional[UsageDetails], + FieldMetadata(alias="usageDetails"), + pydantic.Field(alias="usageDetails", default=None), + ] cost_details: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, float]], FieldMetadata(alias="costDetails") - ] = None + typing.Optional[typing.Dict[str, float]], + FieldMetadata(alias="costDetails"), + pydantic.Field(alias="costDetails", default=None), + ] prompt_name: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="promptName") - ] = None + typing.Optional[str], + FieldMetadata(alias="promptName"), + pydantic.Field(alias="promptName", default=None), + ] prompt_version: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="promptVersion") - ] = None + typing.Optional[int], + FieldMetadata(alias="promptVersion"), + pydantic.Field(alias="promptVersion", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/ingestion/types/create_span_body.py b/langfuse/api/ingestion/types/create_span_body.py index 7a47d9748..975f58093 100644 --- a/langfuse/api/ingestion/types/create_span_body.py +++ b/langfuse/api/ingestion/types/create_span_body.py @@ -11,8 +11,10 @@ class CreateSpanBody(CreateEventBody): end_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="endTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="endTime"), + pydantic.Field(alias="endTime", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/ingestion/types/observation_body.py b/langfuse/api/ingestion/types/observation_body.py index e989a768f..3db910c72 100644 --- a/langfuse/api/ingestion/types/observation_body.py +++ b/langfuse/api/ingestion/types/observation_body.py @@ -16,24 +16,33 @@ class ObservationBody(UniversalBaseModel): id: typing.Optional[str] = None trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] type: ObservationType name: typing.Optional[str] = None start_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="startTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="startTime"), + pydantic.Field(alias="startTime", default=None), + ] end_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="endTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="endTime"), + pydantic.Field(alias="endTime", default=None), + ] completion_start_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="completionStartTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="completionStartTime"), + pydantic.Field(alias="completionStartTime", default=None), + ] model: typing.Optional[str] = None model_parameters: typing_extensions.Annotated[ typing.Optional[typing.Dict[str, MapValue]], FieldMetadata(alias="modelParameters"), - ] = None + pydantic.Field(alias="modelParameters", default=None), + ] input: typing.Optional[typing.Any] = None version: typing.Optional[str] = None metadata: typing.Optional[typing.Any] = None @@ -41,11 +50,15 @@ class ObservationBody(UniversalBaseModel): usage: typing.Optional[Usage] = None level: typing.Optional[ObservationLevel] = None status_message: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="statusMessage") - ] = None + typing.Optional[str], + FieldMetadata(alias="statusMessage"), + pydantic.Field(alias="statusMessage", default=None), + ] parent_observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="parentObservationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="parentObservationId"), + pydantic.Field(alias="parentObservationId", default=None), + ] environment: typing.Optional[str] = None model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/ingestion/types/open_ai_usage.py b/langfuse/api/ingestion/types/open_ai_usage.py index 7c1ab3160..47a05a893 100644 --- a/langfuse/api/ingestion/types/open_ai_usage.py +++ b/langfuse/api/ingestion/types/open_ai_usage.py @@ -14,14 +14,20 @@ class OpenAiUsage(UniversalBaseModel): """ prompt_tokens: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="promptTokens") - ] = None + typing.Optional[int], + FieldMetadata(alias="promptTokens"), + pydantic.Field(alias="promptTokens", default=None), + ] completion_tokens: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="completionTokens") - ] = None + typing.Optional[int], + FieldMetadata(alias="completionTokens"), + pydantic.Field(alias="completionTokens", default=None), + ] total_tokens: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="totalTokens") - ] = None + typing.Optional[int], + FieldMetadata(alias="totalTokens"), + pydantic.Field(alias="totalTokens", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/ingestion/types/optional_observation_body.py b/langfuse/api/ingestion/types/optional_observation_body.py index f2aaf9b6d..53675b714 100644 --- a/langfuse/api/ingestion/types/optional_observation_body.py +++ b/langfuse/api/ingestion/types/optional_observation_body.py @@ -12,22 +12,30 @@ class OptionalObservationBody(UniversalBaseModel): trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] name: typing.Optional[str] = None start_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="startTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="startTime"), + pydantic.Field(alias="startTime", default=None), + ] metadata: typing.Optional[typing.Any] = None input: typing.Optional[typing.Any] = None output: typing.Optional[typing.Any] = None level: typing.Optional[ObservationLevel] = None status_message: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="statusMessage") - ] = None + typing.Optional[str], + FieldMetadata(alias="statusMessage"), + pydantic.Field(alias="statusMessage", default=None), + ] parent_observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="parentObservationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="parentObservationId"), + pydantic.Field(alias="parentObservationId", default=None), + ] version: typing.Optional[str] = None environment: typing.Optional[str] = None diff --git a/langfuse/api/ingestion/types/score_body.py b/langfuse/api/ingestion/types/score_body.py index f559187b6..1f22ba0ae 100644 --- a/langfuse/api/ingestion/types/score_body.py +++ b/langfuse/api/ingestion/types/score_body.py @@ -25,17 +25,25 @@ class ScoreBody(UniversalBaseModel): id: typing.Optional[str] = None trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str = pydantic.Field() """ The name of the score. Always overrides "output" for correction scores. @@ -43,12 +51,14 @@ class ScoreBody(UniversalBaseModel): environment: typing.Optional[str] = None queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = pydantic.Field(default=None) - """ - The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue. - """ - + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field( + alias="queueId", + default=None, + description="The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue.", + ), + ] value: CreateScoreValue = pydantic.Field() """ The value of the score. Must be passed as string for categorical scores, and numeric for boolean and numeric scores. Boolean score values must equal either 1 or 0 (true or false) @@ -57,18 +67,23 @@ class ScoreBody(UniversalBaseModel): comment: typing.Optional[str] = None metadata: typing.Optional[typing.Any] = None data_type: typing_extensions.Annotated[ - typing.Optional[ScoreDataType], FieldMetadata(alias="dataType") - ] = pydantic.Field(default=None) - """ - When set, must match the score value's type. If not set, will be inferred from the score value or config - """ - + typing.Optional[ScoreDataType], + FieldMetadata(alias="dataType"), + pydantic.Field( + alias="dataType", + default=None, + description="When set, must match the score value's type. If not set, will be inferred from the score value or config", + ), + ] config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = pydantic.Field(default=None) - """ - Reference a score config on a score. When set, the score name must equal the config name and scores must comply with the config's range and data type. For categorical scores, the value must map to a config category. Numeric scores might be constrained by the score config's max and min values - """ + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field( + alias="configId", + default=None, + description="Reference a score config on a score. When set, the score name must equal the config name and scores must comply with the config's range and data type. For categorical scores, the value must map to a config category. Numeric scores might be constrained by the score config's max and min values", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/ingestion/types/trace_body.py b/langfuse/api/ingestion/types/trace_body.py index 7fb2842a0..d3a1b6a6f 100644 --- a/langfuse/api/ingestion/types/trace_body.py +++ b/langfuse/api/ingestion/types/trace_body.py @@ -14,13 +14,17 @@ class TraceBody(UniversalBaseModel): timestamp: typing.Optional[dt.datetime] = None name: typing.Optional[str] = None user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="userId") - ] = None + typing.Optional[str], + FieldMetadata(alias="userId"), + pydantic.Field(alias="userId", default=None), + ] input: typing.Optional[typing.Any] = None output: typing.Optional[typing.Any] = None session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] release: typing.Optional[str] = None version: typing.Optional[str] = None metadata: typing.Optional[typing.Any] = None diff --git a/langfuse/api/ingestion/types/update_generation_body.py b/langfuse/api/ingestion/types/update_generation_body.py index 1d453e759..6892daf10 100644 --- a/langfuse/api/ingestion/types/update_generation_body.py +++ b/langfuse/api/ingestion/types/update_generation_body.py @@ -14,26 +14,37 @@ class UpdateGenerationBody(UpdateSpanBody): completion_start_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="completionStartTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="completionStartTime"), + pydantic.Field(alias="completionStartTime", default=None), + ] model: typing.Optional[str] = None model_parameters: typing_extensions.Annotated[ typing.Optional[typing.Dict[str, MapValue]], FieldMetadata(alias="modelParameters"), - ] = None + pydantic.Field(alias="modelParameters", default=None), + ] usage: typing.Optional[IngestionUsage] = None prompt_name: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="promptName") - ] = None + typing.Optional[str], + FieldMetadata(alias="promptName"), + pydantic.Field(alias="promptName", default=None), + ] usage_details: typing_extensions.Annotated[ - typing.Optional[UsageDetails], FieldMetadata(alias="usageDetails") - ] = None + typing.Optional[UsageDetails], + FieldMetadata(alias="usageDetails"), + pydantic.Field(alias="usageDetails", default=None), + ] cost_details: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, float]], FieldMetadata(alias="costDetails") - ] = None + typing.Optional[typing.Dict[str, float]], + FieldMetadata(alias="costDetails"), + pydantic.Field(alias="costDetails", default=None), + ] prompt_version: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="promptVersion") - ] = None + typing.Optional[int], + FieldMetadata(alias="promptVersion"), + pydantic.Field(alias="promptVersion", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/ingestion/types/update_span_body.py b/langfuse/api/ingestion/types/update_span_body.py index f094b7cdb..8a0b09fad 100644 --- a/langfuse/api/ingestion/types/update_span_body.py +++ b/langfuse/api/ingestion/types/update_span_body.py @@ -11,8 +11,10 @@ class UpdateSpanBody(UpdateEventBody): end_time: typing_extensions.Annotated[ - typing.Optional[dt.datetime], FieldMetadata(alias="endTime") - ] = None + typing.Optional[dt.datetime], + FieldMetadata(alias="endTime"), + pydantic.Field(alias="endTime", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/legacy/metrics_v1/raw_client.py b/langfuse/api/legacy/metrics_v1/raw_client.py index 61f03e541..f7b7e34b2 100644 --- a/langfuse/api/legacy/metrics_v1/raw_client.py +++ b/langfuse/api/legacy/metrics_v1/raw_client.py @@ -11,9 +11,11 @@ from ...core.api_error import ApiError from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ...core.http_response import AsyncHttpResponse, HttpResponse +from ...core.parse_error import ParsingError from ...core.pydantic_utilities import parse_obj_as from ...core.request_options import RequestOptions from .types.metrics_response import MetricsResponse +from pydantic import ValidationError class RawMetricsV1Client: @@ -162,6 +164,13 @@ def metrics( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -315,6 +324,13 @@ async def metrics( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/legacy/observations_v1/raw_client.py b/langfuse/api/legacy/observations_v1/raw_client.py index 61ecf409d..ca79081b4 100644 --- a/langfuse/api/legacy/observations_v1/raw_client.py +++ b/langfuse/api/legacy/observations_v1/raw_client.py @@ -16,9 +16,11 @@ from ...core.datetime_utils import serialize_datetime from ...core.http_response import AsyncHttpResponse, HttpResponse from ...core.jsonable_encoder import jsonable_encoder +from ...core.parse_error import ParsingError from ...core.pydantic_utilities import parse_obj_as from ...core.request_options import RequestOptions from .types.observations_views import ObservationsViews +from pydantic import ValidationError class RawObservationsV1Client: @@ -123,6 +125,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -383,6 +392,13 @@ def get_many( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -492,6 +508,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -752,6 +775,13 @@ async def get_many( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/legacy/score_v1/raw_client.py b/langfuse/api/legacy/score_v1/raw_client.py index 9bcbe082d..1e2f2174a 100644 --- a/langfuse/api/legacy/score_v1/raw_client.py +++ b/langfuse/api/legacy/score_v1/raw_client.py @@ -14,10 +14,12 @@ from ...core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ...core.http_response import AsyncHttpResponse, HttpResponse from ...core.jsonable_encoder import jsonable_encoder +from ...core.parse_error import ParsingError from ...core.pydantic_utilities import parse_obj_as from ...core.request_options import RequestOptions from ...core.serialization import convert_and_respect_annotation_metadata from .types.create_score_response import CreateScoreResponse +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -183,6 +185,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -277,6 +286,13 @@ def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -444,6 +460,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -538,6 +561,13 @@ async def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/legacy/score_v1/types/create_score_request.py b/langfuse/api/legacy/score_v1/types/create_score_request.py index d54333ac3..bb8400dfc 100644 --- a/langfuse/api/legacy/score_v1/types/create_score_request.py +++ b/langfuse/api/legacy/score_v1/types/create_score_request.py @@ -25,17 +25,25 @@ class CreateScoreRequest(UniversalBaseModel): id: typing.Optional[str] = None trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str value: CreateScoreValue = pydantic.Field() """ @@ -50,25 +58,32 @@ class CreateScoreRequest(UniversalBaseModel): """ queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = pydantic.Field(default=None) - """ - The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue. - """ - + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field( + alias="queueId", + default=None, + description="The annotation queue referenced by the score. Indicates if score was initially created while processing annotation queue.", + ), + ] data_type: typing_extensions.Annotated[ - typing.Optional[ScoreDataType], FieldMetadata(alias="dataType") - ] = pydantic.Field(default=None) - """ - The data type of the score. When passing a configId this field is inferred. Otherwise, this field must be passed or will default to numeric. - """ - + typing.Optional[ScoreDataType], + FieldMetadata(alias="dataType"), + pydantic.Field( + alias="dataType", + default=None, + description="The data type of the score. When passing a configId this field is inferred. Otherwise, this field must be passed or will default to numeric.", + ), + ] config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = pydantic.Field(default=None) - """ - Reference a score config on a score. The unique langfuse identifier of a score config. When passing this field, the dataType and stringValue fields are automatically populated. - """ + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field( + alias="configId", + default=None, + description="Reference a score config on a score. The unique langfuse identifier of a score config. When passing this field, the dataType and stringValue fields are automatically populated.", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/llm_connections/raw_client.py b/langfuse/api/llm_connections/raw_client.py index ef4f87425..ae063fec6 100644 --- a/langfuse/api/llm_connections/raw_client.py +++ b/langfuse/api/llm_connections/raw_client.py @@ -11,11 +11,13 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.llm_adapter import LlmAdapter from .types.llm_connection import LlmConnection from .types.paginated_llm_connections import PaginatedLlmConnections +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -131,6 +133,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -274,6 +283,13 @@ def upsert( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -391,6 +407,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -534,6 +557,13 @@ async def upsert( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/llm_connections/types/llm_connection.py b/langfuse/api/llm_connections/types/llm_connection.py index f74ff98c2..f5eb288a4 100644 --- a/langfuse/api/llm_connections/types/llm_connection.py +++ b/langfuse/api/llm_connections/types/llm_connection.py @@ -26,50 +26,54 @@ class LlmConnection(UniversalBaseModel): """ display_secret_key: typing_extensions.Annotated[ - str, FieldMetadata(alias="displaySecretKey") - ] = pydantic.Field() - """ - Masked version of the secret key for display purposes - """ - + str, + FieldMetadata(alias="displaySecretKey"), + pydantic.Field( + alias="displaySecretKey", + description="Masked version of the secret key for display purposes", + ), + ] base_url: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="baseURL") - ] = pydantic.Field(default=None) - """ - Custom base URL for the LLM API - """ - + typing.Optional[str], + FieldMetadata(alias="baseURL"), + pydantic.Field( + alias="baseURL", default=None, description="Custom base URL for the LLM API" + ), + ] custom_models: typing_extensions.Annotated[ - typing.List[str], FieldMetadata(alias="customModels") - ] = pydantic.Field() - """ - List of custom model names available for this connection - """ - + typing.List[str], + FieldMetadata(alias="customModels"), + pydantic.Field( + alias="customModels", + description="List of custom model names available for this connection", + ), + ] with_default_models: typing_extensions.Annotated[ - bool, FieldMetadata(alias="withDefaultModels") - ] = pydantic.Field() - """ - Whether to include default models for this adapter - """ - + bool, + FieldMetadata(alias="withDefaultModels"), + pydantic.Field( + alias="withDefaultModels", + description="Whether to include default models for this adapter", + ), + ] extra_header_keys: typing_extensions.Annotated[ - typing.List[str], FieldMetadata(alias="extraHeaderKeys") - ] = pydantic.Field() - """ - Keys of extra headers sent with requests (values excluded for security) - """ - + typing.List[str], + FieldMetadata(alias="extraHeaderKeys"), + pydantic.Field( + alias="extraHeaderKeys", + description="Keys of extra headers sent with requests (values excluded for security)", + ), + ] config: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) """ Adapter-specific configuration. Required for Bedrock (`{"region":"us-east-1"}`), optional for VertexAI (`{"location":"us-central1"}`), not used by other adapters. """ created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/llm_connections/types/upsert_llm_connection_request.py b/langfuse/api/llm_connections/types/upsert_llm_connection_request.py index 712362fa1..b48307088 100644 --- a/langfuse/api/llm_connections/types/upsert_llm_connection_request.py +++ b/langfuse/api/llm_connections/types/upsert_llm_connection_request.py @@ -24,41 +24,43 @@ class UpsertLlmConnectionRequest(UniversalBaseModel): The adapter used to interface with the LLM """ - secret_key: typing_extensions.Annotated[str, FieldMetadata(alias="secretKey")] = ( - pydantic.Field() - ) - """ - Secret key for the LLM API. - """ - + secret_key: typing_extensions.Annotated[ + str, + FieldMetadata(alias="secretKey"), + pydantic.Field(alias="secretKey", description="Secret key for the LLM API."), + ] base_url: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="baseURL") - ] = pydantic.Field(default=None) - """ - Custom base URL for the LLM API - """ - + typing.Optional[str], + FieldMetadata(alias="baseURL"), + pydantic.Field( + alias="baseURL", default=None, description="Custom base URL for the LLM API" + ), + ] custom_models: typing_extensions.Annotated[ - typing.Optional[typing.List[str]], FieldMetadata(alias="customModels") - ] = pydantic.Field(default=None) - """ - List of custom model names - """ - + typing.Optional[typing.List[str]], + FieldMetadata(alias="customModels"), + pydantic.Field( + alias="customModels", default=None, description="List of custom model names" + ), + ] with_default_models: typing_extensions.Annotated[ - typing.Optional[bool], FieldMetadata(alias="withDefaultModels") - ] = pydantic.Field(default=None) - """ - Whether to include default models. Default is true. - """ - + typing.Optional[bool], + FieldMetadata(alias="withDefaultModels"), + pydantic.Field( + alias="withDefaultModels", + default=None, + description="Whether to include default models. Default is true.", + ), + ] extra_headers: typing_extensions.Annotated[ - typing.Optional[typing.Dict[str, str]], FieldMetadata(alias="extraHeaders") - ] = pydantic.Field(default=None) - """ - Extra headers to send with requests - """ - + typing.Optional[typing.Dict[str, str]], + FieldMetadata(alias="extraHeaders"), + pydantic.Field( + alias="extraHeaders", + default=None, + description="Extra headers to send with requests", + ), + ] config: typing.Optional[typing.Dict[str, typing.Any]] = pydantic.Field(default=None) """ Adapter-specific configuration. Validation rules: - **Bedrock**: Required. Must be `{"region": ""}` (e.g., `{"region":"us-east-1"}`) - **VertexAI**: Optional. If provided, must be `{"location": ""}` (e.g., `{"location":"us-central1"}`) - **Other adapters**: Not supported. Omit this field or set to null. diff --git a/langfuse/api/media/raw_client.py b/langfuse/api/media/raw_client.py index 4cc619770..39d1031ae 100644 --- a/langfuse/api/media/raw_client.py +++ b/langfuse/api/media/raw_client.py @@ -13,11 +13,13 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.get_media_response import GetMediaResponse from .types.get_media_upload_url_response import GetMediaUploadUrlResponse from .types.media_content_type import MediaContentType +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -122,6 +124,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -242,6 +251,13 @@ def patch( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -374,6 +390,13 @@ def get_upload_url( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -480,6 +503,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -600,6 +630,13 @@ async def patch( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -732,6 +769,13 @@ async def get_upload_url( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/media/types/get_media_response.py b/langfuse/api/media/types/get_media_response.py index fc1f70329..233d1efd0 100644 --- a/langfuse/api/media/types/get_media_response.py +++ b/langfuse/api/media/types/get_media_response.py @@ -10,45 +10,49 @@ class GetMediaResponse(UniversalBaseModel): - media_id: typing_extensions.Annotated[str, FieldMetadata(alias="mediaId")] = ( - pydantic.Field() - ) - """ - The unique langfuse identifier of a media record - """ - + media_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="mediaId"), + pydantic.Field( + alias="mediaId", + description="The unique langfuse identifier of a media record", + ), + ] content_type: typing_extensions.Annotated[ - str, FieldMetadata(alias="contentType") - ] = pydantic.Field() - """ - The MIME type of the media record - """ - + str, + FieldMetadata(alias="contentType"), + pydantic.Field( + alias="contentType", description="The MIME type of the media record" + ), + ] content_length: typing_extensions.Annotated[ - int, FieldMetadata(alias="contentLength") - ] = pydantic.Field() - """ - The size of the media record in bytes - """ - + int, + FieldMetadata(alias="contentLength"), + pydantic.Field( + alias="contentLength", description="The size of the media record in bytes" + ), + ] uploaded_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="uploadedAt") - ] = pydantic.Field() - """ - The date and time when the media record was uploaded - """ - + dt.datetime, + FieldMetadata(alias="uploadedAt"), + pydantic.Field( + alias="uploadedAt", + description="The date and time when the media record was uploaded", + ), + ] url: str = pydantic.Field() """ The download URL of the media record """ - url_expiry: typing_extensions.Annotated[str, FieldMetadata(alias="urlExpiry")] = ( - pydantic.Field() - ) - """ - The expiry date and time of the media record download URL - """ + url_expiry: typing_extensions.Annotated[ + str, + FieldMetadata(alias="urlExpiry"), + pydantic.Field( + alias="urlExpiry", + description="The expiry date and time of the media record download URL", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/media/types/get_media_upload_url_request.py b/langfuse/api/media/types/get_media_upload_url_request.py index 99f055847..a75bc9c27 100644 --- a/langfuse/api/media/types/get_media_upload_url_request.py +++ b/langfuse/api/media/types/get_media_upload_url_request.py @@ -10,37 +10,41 @@ class GetMediaUploadUrlRequest(UniversalBaseModel): - trace_id: typing_extensions.Annotated[str, FieldMetadata(alias="traceId")] = ( - pydantic.Field() - ) - """ - The trace ID associated with the media record - """ - + trace_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="traceId"), + pydantic.Field( + alias="traceId", description="The trace ID associated with the media record" + ), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = pydantic.Field(default=None) - """ - The observation ID associated with the media record. If the media record is associated directly with a trace, this will be null. - """ - + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field( + alias="observationId", + default=None, + description="The observation ID associated with the media record. If the media record is associated directly with a trace, this will be null.", + ), + ] content_type: typing_extensions.Annotated[ - MediaContentType, FieldMetadata(alias="contentType") + MediaContentType, + FieldMetadata(alias="contentType"), + pydantic.Field(alias="contentType"), ] content_length: typing_extensions.Annotated[ - int, FieldMetadata(alias="contentLength") - ] = pydantic.Field() - """ - The size of the media record in bytes - """ - - sha256hash: typing_extensions.Annotated[str, FieldMetadata(alias="sha256Hash")] = ( - pydantic.Field() - ) - """ - The SHA-256 hash of the media record - """ - + int, + FieldMetadata(alias="contentLength"), + pydantic.Field( + alias="contentLength", description="The size of the media record in bytes" + ), + ] + sha256hash: typing_extensions.Annotated[ + str, + FieldMetadata(alias="sha256Hash"), + pydantic.Field( + alias="sha256Hash", description="The SHA-256 hash of the media record" + ), + ] field: str = pydantic.Field() """ The trace / observation field the media record is associated with. This can be one of `input`, `output`, `metadata` diff --git a/langfuse/api/media/types/get_media_upload_url_response.py b/langfuse/api/media/types/get_media_upload_url_response.py index 90c735be3..66c34ce5b 100644 --- a/langfuse/api/media/types/get_media_upload_url_response.py +++ b/langfuse/api/media/types/get_media_upload_url_response.py @@ -10,18 +10,22 @@ class GetMediaUploadUrlResponse(UniversalBaseModel): upload_url: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="uploadUrl") - ] = pydantic.Field(default=None) - """ - The presigned upload URL. If the asset is already uploaded, this will be null - """ - - media_id: typing_extensions.Annotated[str, FieldMetadata(alias="mediaId")] = ( - pydantic.Field() - ) - """ - The unique langfuse identifier of a media record - """ + typing.Optional[str], + FieldMetadata(alias="uploadUrl"), + pydantic.Field( + alias="uploadUrl", + default=None, + description="The presigned upload URL. If the asset is already uploaded, this will be null", + ), + ] + media_id: typing_extensions.Annotated[ + str, + FieldMetadata(alias="mediaId"), + pydantic.Field( + alias="mediaId", + description="The unique langfuse identifier of a media record", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/media/types/patch_media_body.py b/langfuse/api/media/types/patch_media_body.py index e5f93f601..97c2c4739 100644 --- a/langfuse/api/media/types/patch_media_body.py +++ b/langfuse/api/media/types/patch_media_body.py @@ -11,32 +11,38 @@ class PatchMediaBody(UniversalBaseModel): uploaded_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="uploadedAt") - ] = pydantic.Field() - """ - The date and time when the media record was uploaded - """ - + dt.datetime, + FieldMetadata(alias="uploadedAt"), + pydantic.Field( + alias="uploadedAt", + description="The date and time when the media record was uploaded", + ), + ] upload_http_status: typing_extensions.Annotated[ - int, FieldMetadata(alias="uploadHttpStatus") - ] = pydantic.Field() - """ - The HTTP status code of the upload - """ - + int, + FieldMetadata(alias="uploadHttpStatus"), + pydantic.Field( + alias="uploadHttpStatus", description="The HTTP status code of the upload" + ), + ] upload_http_error: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="uploadHttpError") - ] = pydantic.Field(default=None) - """ - The HTTP error message of the upload - """ - + typing.Optional[str], + FieldMetadata(alias="uploadHttpError"), + pydantic.Field( + alias="uploadHttpError", + default=None, + description="The HTTP error message of the upload", + ), + ] upload_time_ms: typing_extensions.Annotated[ - typing.Optional[int], FieldMetadata(alias="uploadTimeMs") - ] = pydantic.Field(default=None) - """ - The time in milliseconds it took to upload the media record - """ + typing.Optional[int], + FieldMetadata(alias="uploadTimeMs"), + pydantic.Field( + alias="uploadTimeMs", + default=None, + description="The time in milliseconds it took to upload the media record", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/metrics/raw_client.py b/langfuse/api/metrics/raw_client.py index 69c976bcc..30d7b083b 100644 --- a/langfuse/api/metrics/raw_client.py +++ b/langfuse/api/metrics/raw_client.py @@ -11,9 +11,11 @@ from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.metrics_v2response import MetricsV2Response +from pydantic import ValidationError class RawMetricsClient: @@ -266,6 +268,13 @@ def metrics( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -523,6 +532,13 @@ async def metrics( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/models/raw_client.py b/langfuse/api/models/raw_client.py index 0fdc72319..018e786b1 100644 --- a/langfuse/api/models/raw_client.py +++ b/langfuse/api/models/raw_client.py @@ -16,10 +16,12 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from .types.paginated_models import PaginatedModels +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -197,6 +199,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -309,6 +318,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -409,6 +425,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -502,6 +525,13 @@ def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -681,6 +711,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -793,6 +830,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -893,6 +937,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -986,6 +1037,13 @@ async def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/models/types/create_model_request.py b/langfuse/api/models/types/create_model_request.py index dc19db944..4bf4bb25d 100644 --- a/langfuse/api/models/types/create_model_request.py +++ b/langfuse/api/models/types/create_model_request.py @@ -12,91 +12,90 @@ class CreateModelRequest(UniversalBaseModel): - model_name: typing_extensions.Annotated[str, FieldMetadata(alias="modelName")] = ( - pydantic.Field() - ) - """ - Name of the model definition. If multiple with the same name exist, they are applied in the following order: (1) custom over built-in, (2) newest according to startTime where model.startTimeclient.annotation_queues.list_queues(...) -> PaginatedAnnotationQueues +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all annotation queues +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.list_queues() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+ + +
client.annotation_queues.create_queue(...) -> AnnotationQueue +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.create_queue( + name="name", + score_config_ids=[ + "scoreConfigIds", + "scoreConfigIds" + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateAnnotationQueueRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.get_queue(...) -> AnnotationQueue +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get an annotation queue by ID +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.get_queue( + queue_id="queueId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.list_queue_items(...) -> PaginatedAnnotationQueueItems +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get items for a specific annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.list_queue_items( + queue_id="queueId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**status:** `typing.Optional[AnnotationQueueStatus]` — Filter by status + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.get_queue_item(...) -> AnnotationQueueItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a specific item from an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.get_queue_item( + queue_id="queueId", + item_id="itemId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**item_id:** `str` — The unique identifier of the annotation queue item + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.create_queue_item(...) -> AnnotationQueueItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Add an item to an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.create_queue_item( + queue_id="queueId", + object_id="objectId", + object_type="TRACE", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**request:** `CreateAnnotationQueueItemRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.update_queue_item(...) -> AnnotationQueueItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update an annotation queue item +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.update_queue_item( + queue_id="queueId", + item_id="itemId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**item_id:** `str` — The unique identifier of the annotation queue item + +
+
+ +
+
+ +**request:** `UpdateAnnotationQueueItemRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.delete_queue_item(...) -> DeleteAnnotationQueueItemResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Remove an item from an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.delete_queue_item( + queue_id="queueId", + item_id="itemId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**item_id:** `str` — The unique identifier of the annotation queue item + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.create_queue_assignment(...) -> CreateAnnotationQueueAssignmentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create an assignment for a user to an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.create_queue_assignment( + queue_id="queueId", + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**request:** `AnnotationQueueAssignmentRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.annotation_queues.delete_queue_assignment(...) -> DeleteAnnotationQueueAssignmentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete an assignment for a user to an annotation queue +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.annotation_queues.delete_queue_assignment( + queue_id="queueId", + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**queue_id:** `str` — The unique identifier of the annotation queue + +
+
+ +
+
+ +**request:** `AnnotationQueueAssignmentRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## BlobStorageIntegrations +
client.blob_storage_integrations.get_blob_storage_integrations() -> BlobStorageIntegrationsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all blob storage integrations for the organization (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.blob_storage_integrations.get_blob_storage_integrations() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.blob_storage_integrations.upsert_blob_storage_integration(...) -> BlobStorageIntegrationResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create or update a blob storage integration for a specific project (requires organization-scoped API key). The configuration is validated by performing a test upload to the bucket. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.blob_storage_integrations.upsert_blob_storage_integration( + project_id="projectId", + type="S3", + bucket_name="bucketName", + region="region", + export_frequency="hourly", + enabled=True, + force_path_style=True, + file_type="JSON", + export_mode="FULL_HISTORY", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateBlobStorageIntegrationRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.blob_storage_integrations.get_blob_storage_integration_status(...) -> BlobStorageIntegrationStatusResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get the sync status of a blob storage integration by integration ID (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.blob_storage_integrations.get_blob_storage_integration_status( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.blob_storage_integrations.delete_blob_storage_integration(...) -> BlobStorageIntegrationDeletionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a blob storage integration by ID (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.blob_storage_integrations.delete_blob_storage_integration( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Comments +
client.comments.create(...) -> CreateCommentResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a comment. Comments may be attached to different object types (trace, observation, session, prompt). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.comments.create( + project_id="projectId", + object_type="objectType", + object_id="objectId", + content="content", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateCommentRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.comments.get(...) -> GetCommentsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all comments +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.comments.get() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1. + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit + +
+
+ +
+
+ +**object_type:** `typing.Optional[str]` — Filter comments by object type (trace, observation, session, prompt). + +
+
+ +
+
+ +**object_id:** `typing.Optional[str]` — Filter comments by object id. If objectType is not provided, an error will be thrown. + +
+
+ +
+
+ +**author_user_id:** `typing.Optional[str]` — Filter comments by author user id. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.comments.get_by_id(...) -> Comment +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a comment by id +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.comments.get_by_id( + comment_id="commentId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**comment_id:** `str` — The unique langfuse identifier of a comment + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## DatasetItems +
client.dataset_items.create(...) -> DatasetItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a dataset item +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_items.create( + dataset_name="datasetName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateDatasetItemRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.dataset_items.get(...) -> DatasetItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a dataset item +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_items.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.dataset_items.list(...) -> PaginatedDatasetItems +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get dataset items. Optionally specify a version to get the items as they existed at that point in time. +Note: If version parameter is provided, datasetName must also be provided. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_items.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_name:** `typing.Optional[str]` + +
+
+ +
+
+ +**source_trace_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**source_observation_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**version:** `typing.Optional[datetime.datetime]` + +ISO 8601 timestamp (RFC 3339, Section 5.6) in UTC (e.g., "2026-01-21T14:35:42Z"). +If provided, returns state of dataset at this timestamp. +If not provided, returns the latest version. Requires datasetName to be specified. + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.dataset_items.delete(...) -> DeleteDatasetItemResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a dataset item and all its run items. This action is irreversible. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_items.delete( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## DatasetRunItems +
client.dataset_run_items.create(...) -> DatasetRunItem +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a dataset run item +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_run_items.create( + run_name="runName", + dataset_item_id="datasetItemId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateDatasetRunItemRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.dataset_run_items.list(...) -> PaginatedDatasetRunItems +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List dataset run items +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.dataset_run_items.list( + dataset_id="datasetId", + run_name="runName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_id:** `str` + +
+
+ +
+
+ +**run_name:** `str` + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Datasets +
client.datasets.list(...) -> PaginatedDatasets +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all datasets +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.get(...) -> Dataset +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a dataset +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.get( + dataset_name="datasetName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_name:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.create(...) -> Dataset +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a dataset +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.create( + name="name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateDatasetRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.get_run(...) -> DatasetRunWithItems +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a dataset run and its items +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.get_run( + dataset_name="datasetName", + run_name="runName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_name:** `str` + +
+
+ +
+
+ +**run_name:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.delete_run(...) -> DeleteDatasetRunResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a dataset run and all its run items. This action is irreversible. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.delete_run( + dataset_name="datasetName", + run_name="runName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_name:** `str` + +
+
+ +
+
+ +**run_name:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.datasets.get_runs(...) -> PaginatedDatasetRuns +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get dataset runs +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.datasets.get_runs( + dataset_name="datasetName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**dataset_name:** `str` + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Health +
client.health.health() -> HealthResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Check health of API and database +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.health.health() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Ingestion +
client.ingestion.batch(...) -> IngestionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +**Legacy endpoint for batch ingestion for Langfuse Observability.** + +-> Please use the OpenTelemetry endpoint (`/api/public/otel/v1/traces`). Learn more: https://langfuse.com/integrations/native/opentelemetry + +Within each batch, there can be multiple events. +Each event has a type, an id, a timestamp, metadata and a body. +Internally, we refer to this as the "event envelope" as it tells us something about the event but not the trace. +We use the event id within this envelope to deduplicate messages to avoid processing the same event twice, i.e. the event id should be unique per request. +The event.body.id is the ID of the actual trace and will be used for updates and will be visible within the Langfuse App. +I.e. if you want to update a trace, you'd use the same body id, but separate event IDs. + +Notes: +- Introduction to data model: https://langfuse.com/docs/observability/data-model +- Batch sizes are limited to 3.5 MB in total. You need to adjust the number of events per batch accordingly. +- The API does not return a 4xx status code for input errors. Instead, it responds with a 207 status code, which includes a list of the encountered errors. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI +from langfuse.ingestion import TraceBody +import datetime + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.ingestion.batch( + batch=[ + { + "type": "trace-create", + "id": "abcdef-1234-5678-90ab", + "timestamp": "2022-01-01T00:00:00.000Z", + "body": TraceBody( + id="abcdef-1234-5678-90ab", + timestamp=datetime.datetime.fromisoformat("2022-01-01T00:00:00.000+00:00"), + environment="production", + name="My Trace", + user_id="1234-5678-90ab-cdef", + input="My input", + output="My output", + session_id="1234-5678-90ab-cdef", + release="1.0.0", + version="1.0.0", + metadata="My metadata", + tags=[ + "tag1", + "tag2" + ], + public=True, + ) + } + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**batch:** `typing.List[IngestionEvent]` — Batch of tracing events to be ingested. Discriminated by attribute `type`. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Any]` — Optional. Metadata field used by the Langfuse SDKs for debugging. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Legacy MetricsV1 +
client.legacy.metrics_v1.metrics(...) -> MetricsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get metrics from the Langfuse project using a query object. + +Consider using the [v2 metrics endpoint](/api-reference#tag/metricsv2/GET/api/public/v2/metrics) for better performance. + +For more details, see the [Metrics API documentation](https://langfuse.com/docs/metrics/features/metrics-api). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.legacy.metrics_v1.metrics( + query="query", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**query:** `str` + +JSON string containing the query parameters with the following structure: +```json +{ + "view": string, // Required. One of "traces", "observations", "scores-numeric", "scores-categorical" + "dimensions": [ // Optional. Default: [] + { + "field": string // Field to group by, e.g. "name", "userId", "sessionId" + } + ], + "metrics": [ // Required. At least one metric must be provided + { + "measure": string, // What to measure, e.g. "count", "latency", "value" + "aggregation": string // How to aggregate, e.g. "count", "sum", "avg", "p95", "histogram" + } + ], + "filters": [ // Optional. Default: [] + { + "column": string, // Column to filter on + "operator": string, // Operator, e.g. "=", ">", "<", "contains" + "value": any, // Value to compare against + "type": string, // Data type, e.g. "string", "number", "stringObject" + "key": string // Required only when filtering on metadata + } + ], + "timeDimension": { // Optional. Default: null. If provided, results will be grouped by time + "granularity": string // One of "minute", "hour", "day", "week", "month", "auto" + }, + "fromTimestamp": string, // Required. ISO datetime string for start of time range + "toTimestamp": string, // Required. ISO datetime string for end of time range + "orderBy": [ // Optional. Default: null + { + "field": string, // Field to order by + "direction": string // "asc" or "desc" + } + ], + "config": { // Optional. Query-specific configuration + "bins": number, // Optional. Number of bins for histogram (1-100), default: 10 + "row_limit": number // Optional. Row limit for results (1-1000) + } +} +``` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Legacy ObservationsV1 +
client.legacy.observations_v1.get(...) -> ObservationsView +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a observation +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.legacy.observations_v1.get( + observation_id="observationId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**observation_id:** `str` — The unique langfuse identifier of an observation, can be an event, span or generation + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.legacy.observations_v1.get_many(...) -> ObservationsViews +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of observations. + +Consider using the [v2 observations endpoint](/api-reference#tag/observationsv2/GET/api/public/v2/observations) for cursor-based pagination and field selection. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.legacy.observations_v1.get_many() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1. + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**type:** `typing.Optional[str]` + +
+
+ +
+
+ +**trace_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**level:** `typing.Optional[ObservationLevel]` — Optional filter for observations with a specific level (e.g. "DEBUG", "DEFAULT", "WARNING", "ERROR"). + +
+
+ +
+
+ +**parent_observation_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**environment:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Optional filter for observations where the environment is one of the provided values. + +
+
+ +
+
+ +**from_start_time:** `typing.Optional[datetime.datetime]` — Retrieve only observations with a start_time on or after this datetime (ISO 8601). + +
+
+ +
+
+ +**to_start_time:** `typing.Optional[datetime.datetime]` — Retrieve only observations with a start_time before this datetime (ISO 8601). + +
+
+ +
+
+ +**version:** `typing.Optional[str]` — Optional filter to only include observations with a certain version. + +
+
+ +
+
+ +**filter:** `typing.Optional[str]` + +JSON string containing an array of filter conditions. When provided, this takes precedence over query parameter filters (userId, name, type, level, environment, fromStartTime, ...). + +## Filter Structure +Each filter condition has the following structure: +```json +[ + { + "type": string, // Required. One of: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "column": string, // Required. Column to filter on (see available columns below) + "operator": string, // Required. Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - categoryOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject: "=", "contains", "does not contain", "starts with", "ends with" + // - numberObject: "=", ">", "<", ">=", "<=" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Required (except for null type). Value to compare against. Type depends on filter type + "key": string // Required only for stringObject, numberObject, and categoryOptions types when filtering on nested fields like metadata + } +] +``` + +## Available Columns + +### Core Observation Fields +- `id` (string) - Observation ID +- `type` (string) - Observation type (SPAN, GENERATION, EVENT) +- `name` (string) - Observation name +- `traceId` (string) - Associated trace ID +- `startTime` (datetime) - Observation start time +- `endTime` (datetime) - Observation end time +- `environment` (string) - Environment tag +- `level` (string) - Log level (DEBUG, DEFAULT, WARNING, ERROR) +- `statusMessage` (string) - Status message +- `version` (string) - Version tag + +### Performance Metrics +- `latency` (number) - Latency in seconds (calculated: end_time - start_time) +- `timeToFirstToken` (number) - Time to first token in seconds +- `tokensPerSecond` (number) - Output tokens per second + +### Token Usage +- `inputTokens` (number) - Number of input tokens +- `outputTokens` (number) - Number of output tokens +- `totalTokens` (number) - Total tokens (alias: `tokens`) + +### Cost Metrics +- `inputCost` (number) - Input cost in USD +- `outputCost` (number) - Output cost in USD +- `totalCost` (number) - Total cost in USD + +### Model Information +- `model` (string) - Provided model name +- `promptName` (string) - Associated prompt name +- `promptVersion` (number) - Associated prompt version + +### Structured Data +- `metadata` (stringObject/numberObject/categoryOptions) - Metadata key-value pairs. Use `key` parameter to filter on specific metadata keys. + +### Associated Trace Fields (requires join with traces table) +- `userId` (string) - User ID from associated trace +- `traceName` (string) - Name from associated trace +- `traceEnvironment` (string) - Environment from associated trace +- `traceTags` (arrayOptions) - Tags from associated trace + +## Filter Examples +```json +[ + { + "type": "string", + "column": "type", + "operator": "=", + "value": "GENERATION" + }, + { + "type": "number", + "column": "latency", + "operator": ">=", + "value": 2.5 + }, + { + "type": "stringObject", + "column": "metadata", + "key": "environment", + "operator": "=", + "value": "production" + } +] +``` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Legacy ScoreV1 +
client.legacy.score_v1.create(...) -> CreateScoreResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a score (supports both trace and session scores) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.legacy.score_v1.create( + name="name", + value=1.1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateScoreRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.legacy.score_v1.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a score (supports both trace and session scores) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.legacy.score_v1.delete( + score_id="scoreId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**score_id:** `str` — The unique langfuse identifier of a score + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## LlmConnections +
client.llm_connections.list(...) -> PaginatedLlmConnections +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all LLM connections in a project +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.llm_connections.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.llm_connections.upsert(...) -> LlmConnection +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create or update an LLM connection. The connection is upserted on provider. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.llm_connections.upsert( + provider="provider", + adapter="anthropic", + secret_key="secretKey", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `UpsertLlmConnectionRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Media +
client.media.get(...) -> GetMediaResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a media record +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.media.get( + media_id="mediaId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**media_id:** `str` — The unique langfuse identifier of a media record + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.media.patch(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Patch a media record +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI +import datetime + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.media.patch( + media_id="mediaId", + uploaded_at=datetime.datetime.fromisoformat("2024-01-15T09:30:00+00:00"), + upload_http_status=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**media_id:** `str` — The unique langfuse identifier of a media record + +
+
+ +
+
+ +**request:** `PatchMediaBody` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.media.get_upload_url(...) -> GetMediaUploadUrlResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a presigned upload URL for a media record +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.media.get_upload_url( + trace_id="traceId", + content_type="image/png", + content_length=1, + sha256hash="sha256Hash", + field="field", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `GetMediaUploadUrlRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Metrics +
client.metrics.metrics(...) -> MetricsV2Response +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get metrics from the Langfuse project using a query object. V2 endpoint with optimized performance. + +## V2 Differences +- Supports `observations`, `scores-numeric`, and `scores-categorical` views only (traces view not supported) +- Direct access to tags and release fields on observations +- Backwards-compatible: traceName, traceRelease, traceVersion dimensions are still available on observations view +- High cardinality dimensions are not supported and will return a 400 error (see below) + +For more details, see the [Metrics API documentation](https://langfuse.com/docs/metrics/features/metrics-api). + +## Available Views + +### observations +Query observation-level data (spans, generations, events). + +**Dimensions:** +- `environment` - Deployment environment (e.g., production, staging) +- `type` - Type of observation (SPAN, GENERATION, EVENT) +- `name` - Name of the observation +- `level` - Logging level of the observation +- `version` - Version of the observation +- `tags` - User-defined tags +- `release` - Release version +- `traceName` - Name of the parent trace (backwards-compatible) +- `traceRelease` - Release version of the parent trace (backwards-compatible, maps to release) +- `traceVersion` - Version of the parent trace (backwards-compatible, maps to version) +- `providedModelName` - Name of the model used +- `promptName` - Name of the prompt used +- `promptVersion` - Version of the prompt used +- `startTimeMonth` - Month of start_time in YYYY-MM format + +**Measures:** +- `count` - Total number of observations +- `latency` - Observation latency (milliseconds) +- `streamingLatency` - Generation latency from completion start to end (milliseconds) +- `inputTokens` - Sum of input tokens consumed +- `outputTokens` - Sum of output tokens produced +- `totalTokens` - Sum of all tokens consumed +- `outputTokensPerSecond` - Output tokens per second +- `tokensPerSecond` - Total tokens per second +- `inputCost` - Input cost (USD) +- `outputCost` - Output cost (USD) +- `totalCost` - Total cost (USD) +- `timeToFirstToken` - Time to first token (milliseconds) +- `countScores` - Number of scores attached to the observation + +### scores-numeric +Query numeric and boolean score data. + +**Dimensions:** +- `environment` - Deployment environment +- `name` - Name of the score (e.g., accuracy, toxicity) +- `source` - Origin of the score (API, ANNOTATION, EVAL) +- `dataType` - Data type (NUMERIC, BOOLEAN) +- `configId` - Identifier of the score config +- `timestampMonth` - Month in YYYY-MM format +- `timestampDay` - Day in YYYY-MM-DD format +- `value` - Numeric value of the score +- `traceName` - Name of the parent trace +- `tags` - Tags +- `traceRelease` - Release version +- `traceVersion` - Version +- `observationName` - Name of the associated observation +- `observationModelName` - Model name of the associated observation +- `observationPromptName` - Prompt name of the associated observation +- `observationPromptVersion` - Prompt version of the associated observation + +**Measures:** +- `count` - Total number of scores +- `value` - Score value (for aggregations) + +### scores-categorical +Query categorical score data. Same dimensions as scores-numeric except uses `stringValue` instead of `value`. + +**Measures:** +- `count` - Total number of scores + +## High Cardinality Dimensions +The following dimensions cannot be used as grouping dimensions in v2 metrics API as they can cause performance issues. +Use them in filters instead. + +**observations view:** +- `id` - Use traceId filter to narrow down results +- `traceId` - Use traceId filter instead +- `userId` - Use userId filter instead +- `sessionId` - Use sessionId filter instead +- `parentObservationId` - Use parentObservationId filter instead + +**scores-numeric / scores-categorical views:** +- `id` - Use specific filters to narrow down results +- `traceId` - Use traceId filter instead +- `userId` - Use userId filter instead +- `sessionId` - Use sessionId filter instead +- `observationId` - Use observationId filter instead + +## Aggregations +Available aggregation functions: `sum`, `avg`, `count`, `max`, `min`, `p50`, `p75`, `p90`, `p95`, `p99`, `histogram` + +## Time Granularities +Available granularities for timeDimension: `auto`, `minute`, `hour`, `day`, `week`, `month` +- `auto` bins the data into approximately 50 buckets based on the time range +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.metrics.metrics( + query="query", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**query:** `str` + +JSON string containing the query parameters with the following structure: +```json +{ + "view": string, // Required. One of "observations", "scores-numeric", "scores-categorical" + "dimensions": [ // Optional. Default: [] + { + "field": string // Field to group by (see available dimensions above) + } + ], + "metrics": [ // Required. At least one metric must be provided + { + "measure": string, // What to measure (see available measures above) + "aggregation": string // How to aggregate: "sum", "avg", "count", "max", "min", "p50", "p75", "p90", "p95", "p99", "histogram" + } + ], + "filters": [ // Optional. Default: [] + { + "column": string, // Column to filter on (any dimension field) + "operator": string, // Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject/numberObject: same as string/number with required "key" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Value to compare against + "type": string, // Data type: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "key": string // Required only for stringObject/numberObject types (e.g., metadata filtering) + } + ], + "timeDimension": { // Optional. Default: null. If provided, results will be grouped by time + "granularity": string // One of "auto", "minute", "hour", "day", "week", "month" + }, + "fromTimestamp": string, // Required. ISO datetime string for start of time range + "toTimestamp": string, // Required. ISO datetime string for end of time range (must be after fromTimestamp) + "orderBy": [ // Optional. Default: null + { + "field": string, // Field to order by (dimension or metric alias) + "direction": string // "asc" or "desc" + } + ], + "config": { // Optional. Query-specific configuration + "bins": number, // Optional. Number of bins for histogram aggregation (1-100), default: 10 + "row_limit": number // Optional. Maximum number of rows to return (1-1000), default: 100 + } +} +``` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Models +
client.models.create(...) -> Model +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a model +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.models.create( + model_name="modelName", + match_pattern="matchPattern", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateModelRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.models.list(...) -> PaginatedModels +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all models +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.models.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.models.get(...) -> Model +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a model +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.models.get( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.models.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a model. Cannot delete models managed by Langfuse. You can create your own definition with the same modelName to override the definition though. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.models.delete( + id="id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Observations +
client.observations.get_many(...) -> ObservationsV2Response +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of observations with cursor-based pagination and flexible field selection. + +## Cursor-based Pagination +This endpoint uses cursor-based pagination for efficient traversal of large datasets. +The cursor is returned in the response metadata and should be passed in subsequent requests +to retrieve the next page of results. + +## Field Selection +Use the `fields` parameter to control which observation fields are returned: +- `core` - Always included: id, traceId, startTime, endTime, projectId, parentObservationId, type +- `basic` - name, level, statusMessage, version, environment, bookmarked, public, userId, sessionId +- `time` - completionStartTime, createdAt, updatedAt +- `io` - input, output +- `metadata` - metadata (truncated to 200 chars by default, use `expandMetadata` to get full values) +- `model` - providedModelName, internalModelId, modelParameters +- `usage` - usageDetails, costDetails, totalCost +- `prompt` - promptId, promptName, promptVersion +- `metrics` - latency, timeToFirstToken + +If not specified, `core` and `basic` field groups are returned. + +## Filters +Multiple filtering options are available via query parameters or the structured `filter` parameter. +When using the `filter` parameter, it takes precedence over individual query parameter filters. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.observations.get_many() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**fields:** `typing.Optional[str]` + +Comma-separated list of field groups to include in the response. +Available groups: core, basic, time, io, metadata, model, usage, prompt, metrics. +If not specified, `core` and `basic` field groups are returned. +Example: "basic,usage,model" + +
+
+ +
+
+ +**expand_metadata:** `typing.Optional[str]` + +Comma-separated list of metadata keys to return non-truncated. +By default, metadata values over 200 characters are truncated. +Use this parameter to retrieve full values for specific keys. +Example: "key1,key2" + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Number of items to return per page. Maximum 1000, default 50. + +
+
+ +
+
+ +**cursor:** `typing.Optional[str]` — Base64-encoded cursor for pagination. Use the cursor from the previous response to get the next page. + +
+
+ +
+
+ +**parse_io_as_json:** `typing.Optional[bool]` + +**Deprecated.** Setting this to `true` will return a 400 error. +Input/output fields are always returned as raw strings. +Remove this parameter or set it to `false`. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**type:** `typing.Optional[str]` — Filter by observation type (e.g., "GENERATION", "SPAN", "EVENT", "AGENT", "TOOL", "CHAIN", "RETRIEVER", "EVALUATOR", "EMBEDDING", "GUARDRAIL") + +
+
+ +
+
+ +**trace_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**level:** `typing.Optional[ObservationLevel]` — Optional filter for observations with a specific level (e.g. "DEBUG", "DEFAULT", "WARNING", "ERROR"). + +
+
+ +
+
+ +**parent_observation_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**environment:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Optional filter for observations where the environment is one of the provided values. + +
+
+ +
+
+ +**from_start_time:** `typing.Optional[datetime.datetime]` — Retrieve only observations with a start_time on or after this datetime (ISO 8601). + +
+
+ +
+
+ +**to_start_time:** `typing.Optional[datetime.datetime]` — Retrieve only observations with a start_time before this datetime (ISO 8601). + +
+
+ +
+
+ +**version:** `typing.Optional[str]` — Optional filter to only include observations with a certain version. + +
+
+ +
+
+ +**filter:** `typing.Optional[str]` + +JSON string containing an array of filter conditions. When provided, this takes precedence over query parameter filters (userId, name, type, level, environment, fromStartTime, ...). + +## Filter Structure +Each filter condition has the following structure: +```json +[ + { + "type": string, // Required. One of: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "column": string, // Required. Column to filter on (see available columns below) + "operator": string, // Required. Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - categoryOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject: "=", "contains", "does not contain", "starts with", "ends with" + // - numberObject: "=", ">", "<", ">=", "<=" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Required (except for null type). Value to compare against. Type depends on filter type + "key": string // Required only for stringObject, numberObject, and categoryOptions types when filtering on nested fields like metadata + } +] +``` + +## Available Columns + +### Core Observation Fields +- `id` (string) - Observation ID +- `type` (string) - Observation type (SPAN, GENERATION, EVENT) +- `name` (string) - Observation name +- `traceId` (string) - Associated trace ID +- `startTime` (datetime) - Observation start time +- `endTime` (datetime) - Observation end time +- `environment` (string) - Environment tag +- `level` (string) - Log level (DEBUG, DEFAULT, WARNING, ERROR) +- `statusMessage` (string) - Status message +- `version` (string) - Version tag +- `userId` (string) - User ID +- `sessionId` (string) - Session ID + +### Trace-Related Fields +- `traceName` (string) - Name of the parent trace +- `traceTags` (arrayOptions) - Tags from the parent trace +- `tags` (arrayOptions) - Alias for traceTags + +### Performance Metrics +- `latency` (number) - Latency in seconds (calculated: end_time - start_time) +- `timeToFirstToken` (number) - Time to first token in seconds +- `tokensPerSecond` (number) - Output tokens per second + +### Token Usage +- `inputTokens` (number) - Number of input tokens +- `outputTokens` (number) - Number of output tokens +- `totalTokens` (number) - Total tokens (alias: `tokens`) + +### Cost Metrics +- `inputCost` (number) - Input cost in USD +- `outputCost` (number) - Output cost in USD +- `totalCost` (number) - Total cost in USD + +### Model Information +- `model` (string) - Provided model name (alias: `providedModelName`) +- `promptName` (string) - Associated prompt name +- `promptVersion` (number) - Associated prompt version + +### Structured Data +- `metadata` (stringObject/numberObject/categoryOptions) - Metadata key-value pairs. Use `key` parameter to filter on specific metadata keys. + +## Filter Examples +```json +[ + { + "type": "string", + "column": "type", + "operator": "=", + "value": "GENERATION" + }, + { + "type": "number", + "column": "latency", + "operator": ">=", + "value": 2.5 + }, + { + "type": "stringObject", + "column": "metadata", + "key": "environment", + "operator": "=", + "value": "production" + } +] +``` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Opentelemetry +
client.opentelemetry.export_traces(...) -> OtelTraceResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +**OpenTelemetry Traces Ingestion Endpoint** + +This endpoint implements the OTLP/HTTP specification for trace ingestion, providing native OpenTelemetry integration for Langfuse Observability. + +**Supported Formats:** +- Binary Protobuf: `Content-Type: application/x-protobuf` +- JSON Protobuf: `Content-Type: application/json` +- Supports gzip compression via `Content-Encoding: gzip` header + +**Specification Compliance:** +- Conforms to [OTLP/HTTP Trace Export](https://opentelemetry.io/docs/specs/otlp/#otlphttp) +- Implements `ExportTraceServiceRequest` message format + +**Documentation:** +- Integration guide: https://langfuse.com/integrations/native/opentelemetry +- Data model: https://langfuse.com/docs/observability/data-model +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI +from langfuse.opentelemetry import OtelResourceSpan, OtelResource, OtelAttribute, OtelAttributeValue, OtelScopeSpan, OtelScope, OtelSpan + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.opentelemetry.export_traces( + resource_spans=[ + OtelResourceSpan( + resource=OtelResource( + attributes=[ + OtelAttribute( + key="service.name", + value=OtelAttributeValue( + string_value="my-service", + ), + ), + OtelAttribute( + key="service.version", + value=OtelAttributeValue( + string_value="1.0.0", + ), + ) + ], + ), + scope_spans=[ + OtelScopeSpan( + scope=OtelScope( + name="langfuse-sdk", + version="2.60.3", + ), + spans=[ + OtelSpan( + trace_id="0123456789abcdef0123456789abcdef", + span_id="0123456789abcdef", + name="my-operation", + kind=1, + start_time_unix_nano="1747872000000000000", + end_time_unix_nano="1747872001000000000", + attributes=[ + OtelAttribute( + key="langfuse.observation.type", + value=OtelAttributeValue( + string_value="generation", + ), + ) + ], + status={}, + ) + ], + ) + ], + ) + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**resource_spans:** `typing.List[OtelResourceSpan]` — Array of resource spans containing trace data as defined in the OTLP specification + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Organizations +
client.organizations.get_organization_memberships() -> MembershipsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all memberships for the organization associated with the API key (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.get_organization_memberships() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.update_organization_membership(...) -> MembershipResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create or update a membership for the organization associated with the API key (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.update_organization_membership( + user_id="userId", + role="OWNER", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `MembershipRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.delete_organization_membership(...) -> MembershipDeletionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a membership from the organization associated with the API key (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.delete_organization_membership( + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `DeleteMembershipRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.get_project_memberships(...) -> MembershipsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all memberships for a specific project (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.get_project_memberships( + project_id="projectId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.update_project_membership(...) -> MembershipResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create or update a membership for a specific project (requires organization-scoped API key). The user must already be a member of the organization. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.update_project_membership( + project_id="projectId", + user_id="userId", + role="OWNER", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**request:** `MembershipRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.delete_project_membership(...) -> MembershipDeletionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a membership from a specific project (requires organization-scoped API key). The user must be a member of the organization. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.delete_project_membership( + project_id="projectId", + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**request:** `DeleteMembershipRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.get_organization_projects() -> OrganizationProjectsResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all projects for the organization associated with the API key (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.get_organization_projects() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.organizations.get_organization_api_keys() -> OrganizationApiKeysResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all API keys for the organization associated with the API key (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.organizations.get_organization_api_keys() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Projects +
client.projects.get() -> Projects +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get Project associated with API key (requires project-scoped API key). You can use GET /api/public/organizations/projects to get all projects with an organization-scoped key. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.get() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.create(...) -> Project +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a new project (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.create( + name="name", + retention=1, +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `str` + +
+
+ +
+
+ +**retention:** `int` — Number of days to retain data. Must be 0 or at least 3 days. Requires data-retention entitlement for non-zero values. Optional. + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Any]]` — Optional metadata for the project + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.update(...) -> Project +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a project by ID (requires organization-scoped API key). +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.update( + project_id="projectId", + name="name", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**name:** `str` + +
+
+ +
+
+ +**metadata:** `typing.Optional[typing.Dict[str, typing.Any]]` — Optional metadata for the project + +
+
+ +
+
+ +**retention:** `typing.Optional[int]` + +Number of days to retain data. +Must be 0 or at least 3 days. +Requires data-retention entitlement for non-zero values. +Optional. Will retain existing retention setting if omitted. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.delete(...) -> ProjectDeletionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a project by ID (requires organization-scoped API key). Project deletion is processed asynchronously. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.delete( + project_id="projectId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.get_api_keys(...) -> ApiKeyList +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all API keys for a project (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.get_api_keys( + project_id="projectId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.create_api_key(...) -> ApiKeyResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a new API key for a project (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.create_api_key( + project_id="projectId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**note:** `typing.Optional[str]` — Optional note for the API key + +
+
+ +
+
+ +**public_key:** `typing.Optional[str]` — Optional predefined public key. Must start with 'pk-lf-'. If provided, secretKey must also be provided. + +
+
+ +
+
+ +**secret_key:** `typing.Optional[str]` — Optional predefined secret key. Must start with 'sk-lf-'. If provided, publicKey must also be provided. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.projects.delete_api_key(...) -> ApiKeyDeletionResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete an API key for a project (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.projects.delete_api_key( + project_id="projectId", + api_key_id="apiKeyId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**project_id:** `str` + +
+
+ +
+
+ +**api_key_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## PromptVersion +
client.prompt_version.update(...) -> Prompt +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update labels for a specific prompt version +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.prompt_version.update( + name="name", + version=1, + new_labels=[ + "newLabels", + "newLabels" + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `str` + +The name of the prompt. If the prompt is in a folder (e.g., "folder/subfolder/prompt-name"), +the folder path must be URL encoded. + +
+
+ +
+
+ +**version:** `int` — Version of the prompt to update + +
+
+ +
+
+ +**new_labels:** `typing.List[str]` — New labels for the prompt version. Labels are unique across versions. The "latest" label is reserved and managed by Langfuse. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Prompts +
client.prompts.get(...) -> Prompt +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a prompt +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.prompts.get( + prompt_name="promptName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**prompt_name:** `str` + +The name of the prompt. If the prompt is in a folder (e.g., "folder/subfolder/prompt-name"), +the folder path must be URL encoded. + +
+
+ +
+
+ +**version:** `typing.Optional[int]` — Version of the prompt to be retrieved. + +
+
+ +
+
+ +**label:** `typing.Optional[str]` — Label of the prompt to be retrieved. Defaults to "production" if no label or version is set. + +
+
+ +
+
+ +**resolve:** `typing.Optional[bool]` — Resolve prompt dependencies before returning the prompt. Defaults to `true`. Set to `false` to return the raw stored prompt with dependency tags intact. This bypasses prompt caching and is intended for debugging or one-off jobs, not production runtime fetches. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.prompts.list(...) -> PromptMetaListResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of prompt names with versions and labels +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.prompts.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**name:** `typing.Optional[str]` + +
+
+ +
+
+ +**label:** `typing.Optional[str]` + +
+
+ +
+
+ +**tag:** `typing.Optional[str]` + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — limit of items per page + +
+
+ +
+
+ +**from_updated_at:** `typing.Optional[datetime.datetime]` — Optional filter to only include prompt versions created/updated on or after a certain datetime (ISO 8601) + +
+
+ +
+
+ +**to_updated_at:** `typing.Optional[datetime.datetime]` — Optional filter to only include prompt versions created/updated before a certain datetime (ISO 8601) + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.prompts.create(...) -> Prompt +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a new version for the prompt with the given `name` +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI +from langfuse.prompts import CreateChatPromptRequest, ChatMessage + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.prompts.create( + request=CreateChatPromptRequest( + name="name", + prompt=[ + ChatMessage( + role="role", + content="content", + ), + ChatMessage( + role="role", + content="content", + ) + ], + type="chat", + ), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreatePromptRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.prompts.delete(...) +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete prompt versions. If neither version nor label is specified, all versions of the prompt are deleted. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.prompts.delete( + prompt_name="promptName", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**prompt_name:** `str` — The name of the prompt + +
+
+ +
+
+ +**label:** `typing.Optional[str]` — Optional label to filter deletion. If specified, deletes all prompt versions that have this label. + +
+
+ +
+
+ +**version:** `typing.Optional[int]` — Optional version to filter deletion. If specified, deletes only this specific version of the prompt. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Scim +
client.scim.get_service_provider_config() -> ServiceProviderConfig +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get SCIM Service Provider Configuration (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.get_service_provider_config() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.get_resource_types() -> ResourceTypesResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get SCIM Resource Types (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.get_resource_types() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.get_schemas() -> SchemasResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get SCIM Schemas (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.get_schemas() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.list_users(...) -> ScimUsersListResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +List users in the organization (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.list_users() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**filter:** `typing.Optional[str]` — Filter expression (e.g. userName eq "value") + +
+
+ +
+
+ +**start_index:** `typing.Optional[int]` — 1-based index of the first result to return (default 1) + +
+
+ +
+
+ +**count:** `typing.Optional[int]` — Maximum number of results to return (default 100) + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.create_user(...) -> ScimUser +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a new user in the organization (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI +from langfuse.scim import ScimName + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.create_user( + user_name="userName", + name=ScimName(), +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**user_name:** `str` — User's email address (required) + +
+
+ +
+
+ +**name:** `ScimName` — User's name information + +
+
+ +
+
+ +**emails:** `typing.Optional[typing.List[ScimEmail]]` — User's email addresses + +
+
+ +
+
+ +**active:** `typing.Optional[bool]` — Whether the user is active + +
+
+ +
+
+ +**password:** `typing.Optional[str]` — Initial password for the user + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.get_user(...) -> ScimUser +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a specific user by ID (requires organization-scoped API key) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.get_user( + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**user_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scim.delete_user(...) -> EmptyResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Remove a user from the organization (requires organization-scoped API key). Note that this only removes the user from the organization but does not delete the user entity itself. +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scim.delete_user( + user_id="userId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**user_id:** `str` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## ScoreConfigs +
client.score_configs.create(...) -> ScoreConfig +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Create a score configuration (config). Score configs are used to define the structure of scores +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.score_configs.create( + name="name", + data_type="NUMERIC", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `CreateScoreConfigRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.score_configs.get(...) -> ScoreConfigs +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get all score configs +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.score_configs.get() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1. + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.score_configs.get_by_id(...) -> ScoreConfig +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a score config +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.score_configs.get_by_id( + config_id="configId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**config_id:** `str` — The unique langfuse identifier of a score config + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.score_configs.update(...) -> ScoreConfig +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Update a score config +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.score_configs.update( + config_id="configId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**config_id:** `str` — The unique langfuse identifier of a score config + +
+
+ +
+
+ +**request:** `UpdateScoreConfigRequest` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Scores +
client.scores.get_many(...) -> GetScoresResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a list of scores (supports both trace and session scores) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scores.get_many() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1. + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit. + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` — Retrieve only scores with this userId associated to the trace. + +
+
+ +
+
+ +**name:** `typing.Optional[str]` — Retrieve only scores with this name. + +
+
+ +
+
+ +**from_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include scores created on or after a certain datetime (ISO 8601) + +
+
+ +
+
+ +**to_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include scores created before a certain datetime (ISO 8601) + +
+
+ +
+
+ +**environment:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Optional filter for scores where the environment is one of the provided values. + +
+
+ +
+
+ +**source:** `typing.Optional[ScoreSource]` — Retrieve only scores from a specific source. + +
+
+ +
+
+ +**operator:** `typing.Optional[str]` — Retrieve only scores with value. + +
+
+ +
+
+ +**value:** `typing.Optional[float]` — Retrieve only scores with value. + +
+
+ +
+
+ +**score_ids:** `typing.Optional[str]` — Comma-separated list of score IDs to limit the results to. + +
+
+ +
+
+ +**config_id:** `typing.Optional[str]` — Retrieve only scores with a specific configId. + +
+
+ +
+
+ +**session_id:** `typing.Optional[str]` — Retrieve only scores with a specific sessionId. + +
+
+ +
+
+ +**dataset_run_id:** `typing.Optional[str]` — Retrieve only scores with a specific datasetRunId. + +
+
+ +
+
+ +**trace_id:** `typing.Optional[str]` — Retrieve only scores with a specific traceId. + +
+
+ +
+
+ +**observation_id:** `typing.Optional[str]` — Comma-separated list of observation IDs to filter scores by. + +
+
+ +
+
+ +**queue_id:** `typing.Optional[str]` — Retrieve only scores with a specific annotation queueId. + +
+
+ +
+
+ +**data_type:** `typing.Optional[ScoreDataType]` — Retrieve only scores with a specific dataType. + +
+
+ +
+
+ +**trace_tags:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Only scores linked to traces that include all of these tags will be returned. + +
+
+ +
+
+ +**fields:** `typing.Optional[str]` — Comma-separated list of field groups to include in the response. Available field groups: 'score' (core score fields), 'trace' (trace properties: userId, tags, environment, sessionId). If not specified, both 'score' and 'trace' are returned by default. Example: 'score' to exclude trace data, 'score,trace' to include both. Note: When filtering by trace properties (using userId or traceTags parameters), the 'trace' field group must be included, otherwise a 400 error will be returned. + +
+
+ +
+
+ +**filter:** `typing.Optional[str]` — A JSON stringified array of filter objects. Each object requires type, column, operator, and value. Supports filtering by score metadata using the stringObject type. Example: [{"type":"stringObject","column":"metadata","key":"user_id","operator":"=","value":"abc123"}]. Supported types: stringObject (metadata key-value filtering), string, number, datetime, stringOptions, arrayOptions. Supported operators for stringObject: =, contains, does not contain, starts with, ends with. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.scores.get_by_id(...) -> Score +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a score (supports both trace and session scores) +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.scores.get_by_id( + score_id="scoreId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**score_id:** `str` — The unique langfuse identifier of a score + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Sessions +
client.sessions.list(...) -> PaginatedSessions +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get sessions +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.sessions.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit. + +
+
+ +
+
+ +**from_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include sessions created on or after a certain datetime (ISO 8601) + +
+
+ +
+
+ +**to_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include sessions created before a certain datetime (ISO 8601) + +
+
+ +
+
+ +**environment:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Optional filter for sessions where the environment is one of the provided values. + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.sessions.get(...) -> SessionWithTraces +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a session. Please note that `traces` on this endpoint are not paginated, if you plan to fetch large sessions, consider `GET /api/public/traces?sessionId=` +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.sessions.get( + session_id="sessionId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**session_id:** `str` — The unique id of a session + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +## Trace +
client.trace.get(...) -> TraceWithFullDetails +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get a specific trace +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.trace.get( + trace_id="traceId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**trace_id:** `str` — The unique langfuse identifier of a trace + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.trace.delete(...) -> DeleteTraceResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete a specific trace +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.trace.delete( + trace_id="traceId", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**trace_id:** `str` — The unique langfuse identifier of the trace to delete + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.trace.list(...) -> Traces +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Get list of traces +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.trace.list() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**page:** `typing.Optional[int]` — Page number, starts at 1 + +
+
+ +
+
+ +**limit:** `typing.Optional[int]` — Limit of items per page. If you encounter api issues due to too large page sizes, try to reduce the limit. + +
+
+ +
+
+ +**user_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**name:** `typing.Optional[str]` + +
+
+ +
+
+ +**session_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**from_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include traces with a trace.timestamp on or after a certain datetime (ISO 8601) + +
+
+ +
+
+ +**to_timestamp:** `typing.Optional[datetime.datetime]` — Optional filter to only include traces with a trace.timestamp before a certain datetime (ISO 8601) + +
+
+ +
+
+ +**order_by:** `typing.Optional[str]` — Format of the string [field].[asc/desc]. Fields: id, timestamp, name, userId, release, version, public, bookmarked, sessionId. Example: timestamp.asc + +
+
+ +
+
+ +**tags:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Only traces that include all of these tags will be returned. + +
+
+ +
+
+ +**version:** `typing.Optional[str]` — Optional filter to only include traces with a certain version. + +
+
+ +
+
+ +**release:** `typing.Optional[str]` — Optional filter to only include traces with a certain release. + +
+
+ +
+
+ +**environment:** `typing.Optional[typing.Union[str, typing.Sequence[str]]]` — Optional filter for traces where the environment is one of the provided values. + +
+
+ +
+
+ +**fields:** `typing.Optional[str]` — Comma-separated list of fields to include in the response. Available field groups: 'core' (always included), 'io' (input, output, metadata), 'scores', 'observations', 'metrics'. If not specified, all fields are returned. Example: 'core,scores,metrics'. Note: Excluded 'observations' or 'scores' fields return empty arrays; excluded 'metrics' returns -1 for 'totalCost' and 'latency'. + +
+
+ +
+
+ +**filter:** `typing.Optional[str]` + +JSON string containing an array of filter conditions. When provided, this takes precedence over query parameter filters (userId, name, sessionId, tags, version, release, environment, fromTimestamp, toTimestamp). + +## Filter Structure +Each filter condition has the following structure: +```json +[ + { + "type": string, // Required. One of: "datetime", "string", "number", "stringOptions", "categoryOptions", "arrayOptions", "stringObject", "numberObject", "boolean", "null" + "column": string, // Required. Column to filter on (see available columns below) + "operator": string, // Required. Operator based on type: + // - datetime: ">", "<", ">=", "<=" + // - string: "=", "contains", "does not contain", "starts with", "ends with" + // - stringOptions: "any of", "none of" + // - categoryOptions: "any of", "none of" + // - arrayOptions: "any of", "none of", "all of" + // - number: "=", ">", "<", ">=", "<=" + // - stringObject: "=", "contains", "does not contain", "starts with", "ends with" + // - numberObject: "=", ">", "<", ">=", "<=" + // - boolean: "=", "<>" + // - null: "is null", "is not null" + "value": any, // Required (except for null type). Value to compare against. Type depends on filter type + "key": string // Required only for stringObject, numberObject, and categoryOptions types when filtering on nested fields like metadata + } +] +``` + +## Available Columns + +### Core Trace Fields +- `id` (string) - Trace ID +- `name` (string) - Trace name +- `timestamp` (datetime) - Trace timestamp +- `userId` (string) - User ID +- `sessionId` (string) - Session ID +- `environment` (string) - Environment tag +- `version` (string) - Version tag +- `release` (string) - Release tag +- `tags` (arrayOptions) - Array of tags +- `bookmarked` (boolean) - Bookmark status + +### Structured Data +- `metadata` (stringObject/numberObject/categoryOptions) - Metadata key-value pairs. Use `key` parameter to filter on specific metadata keys. + +### Aggregated Metrics (from observations) +These metrics are aggregated from all observations within the trace: +- `latency` (number) - Latency in seconds (time from first observation start to last observation end) +- `inputTokens` (number) - Total input tokens across all observations +- `outputTokens` (number) - Total output tokens across all observations +- `totalTokens` (number) - Total tokens (alias: `tokens`) +- `inputCost` (number) - Total input cost in USD +- `outputCost` (number) - Total output cost in USD +- `totalCost` (number) - Total cost in USD + +### Observation Level Aggregations +These fields aggregate observation levels within the trace: +- `level` (string) - Highest severity level (ERROR > WARNING > DEFAULT > DEBUG) +- `warningCount` (number) - Count of WARNING level observations +- `errorCount` (number) - Count of ERROR level observations +- `defaultCount` (number) - Count of DEFAULT level observations +- `debugCount` (number) - Count of DEBUG level observations + +### Scores (requires join with scores table) +- `scores_avg` (number) - Average of numeric scores (alias: `scores`) +- `score_categories` (categoryOptions) - Categorical score values + +## Filter Examples +```json +[ + { + "type": "datetime", + "column": "timestamp", + "operator": ">=", + "value": "2024-01-01T00:00:00Z" + }, + { + "type": "string", + "column": "userId", + "operator": "=", + "value": "user-123" + }, + { + "type": "number", + "column": "totalCost", + "operator": ">=", + "value": 0.01 + }, + { + "type": "arrayOptions", + "column": "tags", + "operator": "all of", + "value": ["production", "critical"] + }, + { + "type": "stringObject", + "column": "metadata", + "key": "customer_tier", + "operator": "=", + "value": "enterprise" + } +] +``` + +## Performance Notes +- Filtering on `userId`, `sessionId`, or `metadata` may enable skip indexes for better query performance +- Score filters require a join with the scores table and may impact query performance + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ +
client.trace.delete_multiple(...) -> DeleteTraceResponse +
+
+ +#### 📝 Description + +
+
+ +
+
+ +Delete multiple traces +
+
+
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from langfuse import LangfuseAPI + +client = LangfuseAPI( + username="", + password="", + base_url="https://yourhost.com/path/to/api", +) + +client.trace.delete_multiple( + trace_ids=[ + "traceIds", + "traceIds" + ], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**trace_ids:** `typing.List[str]` — List of trace IDs to delete + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ + +
+
+
+ diff --git a/langfuse/api/scim/raw_client.py b/langfuse/api/scim/raw_client.py index e65f46592..862cd4f62 100644 --- a/langfuse/api/scim/raw_client.py +++ b/langfuse/api/scim/raw_client.py @@ -12,6 +12,7 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata @@ -23,6 +24,7 @@ from .types.scim_user import ScimUser from .types.scim_users_list_response import ScimUsersListResponse from .types.service_provider_config import ServiceProviderConfig +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -124,6 +126,13 @@ def get_service_provider_config( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -222,6 +231,13 @@ def get_resource_types( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -320,6 +336,13 @@ def get_schemas( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -437,6 +460,13 @@ def list_users( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -571,6 +601,13 @@ def create_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -671,6 +708,13 @@ def get_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -771,6 +815,13 @@ def delete_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -874,6 +925,13 @@ async def get_service_provider_config( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -972,6 +1030,13 @@ async def get_resource_types( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1070,6 +1135,13 @@ async def get_schemas( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1187,6 +1259,13 @@ async def list_users( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1321,6 +1400,13 @@ async def create_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1421,6 +1507,13 @@ async def get_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1521,6 +1614,13 @@ async def delete_user( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/scim/types/authentication_scheme.py b/langfuse/api/scim/types/authentication_scheme.py index fc1fceb14..49a733a27 100644 --- a/langfuse/api/scim/types/authentication_scheme.py +++ b/langfuse/api/scim/types/authentication_scheme.py @@ -11,7 +11,9 @@ class AuthenticationScheme(UniversalBaseModel): name: str description: str - spec_uri: typing_extensions.Annotated[str, FieldMetadata(alias="specUri")] + spec_uri: typing_extensions.Annotated[ + str, FieldMetadata(alias="specUri"), pydantic.Field(alias="specUri") + ] type: str primary: bool diff --git a/langfuse/api/scim/types/bulk_config.py b/langfuse/api/scim/types/bulk_config.py index 4a3ae719f..925514997 100644 --- a/langfuse/api/scim/types/bulk_config.py +++ b/langfuse/api/scim/types/bulk_config.py @@ -11,10 +11,12 @@ class BulkConfig(UniversalBaseModel): supported: bool max_operations: typing_extensions.Annotated[ - int, FieldMetadata(alias="maxOperations") + int, FieldMetadata(alias="maxOperations"), pydantic.Field(alias="maxOperations") ] max_payload_size: typing_extensions.Annotated[ - int, FieldMetadata(alias="maxPayloadSize") + int, + FieldMetadata(alias="maxPayloadSize"), + pydantic.Field(alias="maxPayloadSize"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/filter_config.py b/langfuse/api/scim/types/filter_config.py index ba9986e56..ae8971e3f 100644 --- a/langfuse/api/scim/types/filter_config.py +++ b/langfuse/api/scim/types/filter_config.py @@ -10,7 +10,9 @@ class FilterConfig(UniversalBaseModel): supported: bool - max_results: typing_extensions.Annotated[int, FieldMetadata(alias="maxResults")] + max_results: typing_extensions.Annotated[ + int, FieldMetadata(alias="maxResults"), pydantic.Field(alias="maxResults") + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/scim/types/resource_meta.py b/langfuse/api/scim/types/resource_meta.py index 99be2a96e..8d4bba657 100644 --- a/langfuse/api/scim/types/resource_meta.py +++ b/langfuse/api/scim/types/resource_meta.py @@ -9,7 +9,9 @@ class ResourceMeta(UniversalBaseModel): - resource_type: typing_extensions.Annotated[str, FieldMetadata(alias="resourceType")] + resource_type: typing_extensions.Annotated[ + str, FieldMetadata(alias="resourceType"), pydantic.Field(alias="resourceType") + ] location: str model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/resource_type.py b/langfuse/api/scim/types/resource_type.py index 9913c465d..868aa4d5f 100644 --- a/langfuse/api/scim/types/resource_type.py +++ b/langfuse/api/scim/types/resource_type.py @@ -16,9 +16,13 @@ class ResourceType(UniversalBaseModel): name: str endpoint: str description: str - schema_: typing_extensions.Annotated[str, FieldMetadata(alias="schema")] + schema_: typing_extensions.Annotated[ + str, FieldMetadata(alias="schema"), pydantic.Field(alias="schema") + ] schema_extensions: typing_extensions.Annotated[ - typing.List[SchemaExtension], FieldMetadata(alias="schemaExtensions") + typing.List[SchemaExtension], + FieldMetadata(alias="schemaExtensions"), + pydantic.Field(alias="schemaExtensions"), ] meta: ResourceMeta diff --git a/langfuse/api/scim/types/resource_types_response.py b/langfuse/api/scim/types/resource_types_response.py index 8ff1c47d9..5b8c973d0 100644 --- a/langfuse/api/scim/types/resource_types_response.py +++ b/langfuse/api/scim/types/resource_types_response.py @@ -11,9 +11,13 @@ class ResourceTypesResponse(UniversalBaseModel): schemas: typing.List[str] - total_results: typing_extensions.Annotated[int, FieldMetadata(alias="totalResults")] + total_results: typing_extensions.Annotated[ + int, FieldMetadata(alias="totalResults"), pydantic.Field(alias="totalResults") + ] resources: typing_extensions.Annotated[ - typing.List[ResourceType], FieldMetadata(alias="Resources") + typing.List[ResourceType], + FieldMetadata(alias="Resources"), + pydantic.Field(alias="Resources"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/schema_extension.py b/langfuse/api/scim/types/schema_extension.py index 4a09d6192..b78dee715 100644 --- a/langfuse/api/scim/types/schema_extension.py +++ b/langfuse/api/scim/types/schema_extension.py @@ -9,7 +9,9 @@ class SchemaExtension(UniversalBaseModel): - schema_: typing_extensions.Annotated[str, FieldMetadata(alias="schema")] + schema_: typing_extensions.Annotated[ + str, FieldMetadata(alias="schema"), pydantic.Field(alias="schema") + ] required: bool model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/schemas_response.py b/langfuse/api/scim/types/schemas_response.py index 3162f9431..9533f5663 100644 --- a/langfuse/api/scim/types/schemas_response.py +++ b/langfuse/api/scim/types/schemas_response.py @@ -11,9 +11,13 @@ class SchemasResponse(UniversalBaseModel): schemas: typing.List[str] - total_results: typing_extensions.Annotated[int, FieldMetadata(alias="totalResults")] + total_results: typing_extensions.Annotated[ + int, FieldMetadata(alias="totalResults"), pydantic.Field(alias="totalResults") + ] resources: typing_extensions.Annotated[ - typing.List[SchemaResource], FieldMetadata(alias="Resources") + typing.List[SchemaResource], + FieldMetadata(alias="Resources"), + pydantic.Field(alias="Resources"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/scim_user.py b/langfuse/api/scim/types/scim_user.py index 22d6d50fe..cf12da131 100644 --- a/langfuse/api/scim/types/scim_user.py +++ b/langfuse/api/scim/types/scim_user.py @@ -14,7 +14,9 @@ class ScimUser(UniversalBaseModel): schemas: typing.List[str] id: str - user_name: typing_extensions.Annotated[str, FieldMetadata(alias="userName")] + user_name: typing_extensions.Annotated[ + str, FieldMetadata(alias="userName"), pydantic.Field(alias="userName") + ] name: ScimName emails: typing.List[ScimEmail] meta: UserMeta diff --git a/langfuse/api/scim/types/scim_users_list_response.py b/langfuse/api/scim/types/scim_users_list_response.py index bcfba30bb..9e4908356 100644 --- a/langfuse/api/scim/types/scim_users_list_response.py +++ b/langfuse/api/scim/types/scim_users_list_response.py @@ -11,13 +11,19 @@ class ScimUsersListResponse(UniversalBaseModel): schemas: typing.List[str] - total_results: typing_extensions.Annotated[int, FieldMetadata(alias="totalResults")] - start_index: typing_extensions.Annotated[int, FieldMetadata(alias="startIndex")] + total_results: typing_extensions.Annotated[ + int, FieldMetadata(alias="totalResults"), pydantic.Field(alias="totalResults") + ] + start_index: typing_extensions.Annotated[ + int, FieldMetadata(alias="startIndex"), pydantic.Field(alias="startIndex") + ] items_per_page: typing_extensions.Annotated[ - int, FieldMetadata(alias="itemsPerPage") + int, FieldMetadata(alias="itemsPerPage"), pydantic.Field(alias="itemsPerPage") ] resources: typing_extensions.Annotated[ - typing.List[ScimUser], FieldMetadata(alias="Resources") + typing.List[ScimUser], + FieldMetadata(alias="Resources"), + pydantic.Field(alias="Resources"), ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scim/types/service_provider_config.py b/langfuse/api/scim/types/service_provider_config.py index 48add080e..e2bcab76d 100644 --- a/langfuse/api/scim/types/service_provider_config.py +++ b/langfuse/api/scim/types/service_provider_config.py @@ -16,18 +16,24 @@ class ServiceProviderConfig(UniversalBaseModel): schemas: typing.List[str] documentation_uri: typing_extensions.Annotated[ - str, FieldMetadata(alias="documentationUri") + str, + FieldMetadata(alias="documentationUri"), + pydantic.Field(alias="documentationUri"), ] patch: ScimFeatureSupport bulk: BulkConfig filter: FilterConfig change_password: typing_extensions.Annotated[ - ScimFeatureSupport, FieldMetadata(alias="changePassword") + ScimFeatureSupport, + FieldMetadata(alias="changePassword"), + pydantic.Field(alias="changePassword"), ] sort: ScimFeatureSupport etag: ScimFeatureSupport authentication_schemes: typing_extensions.Annotated[ - typing.List[AuthenticationScheme], FieldMetadata(alias="authenticationSchemes") + typing.List[AuthenticationScheme], + FieldMetadata(alias="authenticationSchemes"), + pydantic.Field(alias="authenticationSchemes"), ] meta: ResourceMeta diff --git a/langfuse/api/scim/types/user_meta.py b/langfuse/api/scim/types/user_meta.py index 033ed4fa1..e64be9143 100644 --- a/langfuse/api/scim/types/user_meta.py +++ b/langfuse/api/scim/types/user_meta.py @@ -9,11 +9,15 @@ class UserMeta(UniversalBaseModel): - resource_type: typing_extensions.Annotated[str, FieldMetadata(alias="resourceType")] + resource_type: typing_extensions.Annotated[ + str, FieldMetadata(alias="resourceType"), pydantic.Field(alias="resourceType") + ] created: typing.Optional[str] = None last_modified: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="lastModified") - ] = None + typing.Optional[str], + FieldMetadata(alias="lastModified"), + pydantic.Field(alias="lastModified", default=None), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/score_configs/raw_client.py b/langfuse/api/score_configs/raw_client.py index 8021940c6..d96797637 100644 --- a/langfuse/api/score_configs/raw_client.py +++ b/langfuse/api/score_configs/raw_client.py @@ -15,10 +15,12 @@ from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from ..core.serialization import convert_and_respect_annotation_metadata from .types.score_configs import ScoreConfigs +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -157,6 +159,13 @@ def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -269,6 +278,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -370,6 +386,13 @@ def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -511,6 +534,13 @@ def update( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -651,6 +681,13 @@ async def create( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -763,6 +800,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -864,6 +908,13 @@ async def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1005,6 +1056,13 @@ async def update( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/score_configs/types/create_score_config_request.py b/langfuse/api/score_configs/types/create_score_config_request.py index 1c23fd91e..e584b1725 100644 --- a/langfuse/api/score_configs/types/create_score_config_request.py +++ b/langfuse/api/score_configs/types/create_score_config_request.py @@ -13,7 +13,9 @@ class CreateScoreConfigRequest(UniversalBaseModel): name: str data_type: typing_extensions.Annotated[ - ScoreConfigDataType, FieldMetadata(alias="dataType") + ScoreConfigDataType, + FieldMetadata(alias="dataType"), + pydantic.Field(alias="dataType"), ] categories: typing.Optional[typing.List[ConfigCategory]] = pydantic.Field( default=None @@ -23,19 +25,23 @@ class CreateScoreConfigRequest(UniversalBaseModel): """ min_value: typing_extensions.Annotated[ - typing.Optional[float], FieldMetadata(alias="minValue") - ] = pydantic.Field(default=None) - """ - Configure a minimum value for numerical scores. If not set, the minimum value defaults to -∞ - """ - + typing.Optional[float], + FieldMetadata(alias="minValue"), + pydantic.Field( + alias="minValue", + default=None, + description="Configure a minimum value for numerical scores. If not set, the minimum value defaults to -∞", + ), + ] max_value: typing_extensions.Annotated[ - typing.Optional[float], FieldMetadata(alias="maxValue") - ] = pydantic.Field(default=None) - """ - Configure a maximum value for numerical scores. If not set, the maximum value defaults to +∞ - """ - + typing.Optional[float], + FieldMetadata(alias="maxValue"), + pydantic.Field( + alias="maxValue", + default=None, + description="Configure a maximum value for numerical scores. If not set, the maximum value defaults to +∞", + ), + ] description: typing.Optional[str] = pydantic.Field(default=None) """ Description is shown across the Langfuse UI and can be used to e.g. explain the config categories in detail, why a numeric range was set, or provide additional context on config name or usage diff --git a/langfuse/api/score_configs/types/update_score_config_request.py b/langfuse/api/score_configs/types/update_score_config_request.py index 5237c544f..67ed01f6a 100644 --- a/langfuse/api/score_configs/types/update_score_config_request.py +++ b/langfuse/api/score_configs/types/update_score_config_request.py @@ -11,12 +11,14 @@ class UpdateScoreConfigRequest(UniversalBaseModel): is_archived: typing_extensions.Annotated[ - typing.Optional[bool], FieldMetadata(alias="isArchived") - ] = pydantic.Field(default=None) - """ - The status of the score config showing if it is archived or not - """ - + typing.Optional[bool], + FieldMetadata(alias="isArchived"), + pydantic.Field( + alias="isArchived", + default=None, + description="The status of the score config showing if it is archived or not", + ), + ] name: typing.Optional[str] = pydantic.Field(default=None) """ The name of the score config @@ -30,19 +32,23 @@ class UpdateScoreConfigRequest(UniversalBaseModel): """ min_value: typing_extensions.Annotated[ - typing.Optional[float], FieldMetadata(alias="minValue") - ] = pydantic.Field(default=None) - """ - Configure a minimum value for numerical scores. If not set, the minimum value defaults to -∞ - """ - + typing.Optional[float], + FieldMetadata(alias="minValue"), + pydantic.Field( + alias="minValue", + default=None, + description="Configure a minimum value for numerical scores. If not set, the minimum value defaults to -∞", + ), + ] max_value: typing_extensions.Annotated[ - typing.Optional[float], FieldMetadata(alias="maxValue") - ] = pydantic.Field(default=None) - """ - Configure a maximum value for numerical scores. If not set, the maximum value defaults to +∞ - """ - + typing.Optional[float], + FieldMetadata(alias="maxValue"), + pydantic.Field( + alias="maxValue", + default=None, + description="Configure a maximum value for numerical scores. If not set, the maximum value defaults to +∞", + ), + ] description: typing.Optional[str] = pydantic.Field(default=None) """ Description is shown across the Langfuse UI and can be used to e.g. explain the config categories in detail, why a numeric range was set, or provide additional context on config name or usage diff --git a/langfuse/api/scores/raw_client.py b/langfuse/api/scores/raw_client.py index 2dc16e688..8d304746a 100644 --- a/langfuse/api/scores/raw_client.py +++ b/langfuse/api/scores/raw_client.py @@ -17,9 +17,11 @@ from ..core.datetime_utils import serialize_datetime from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.get_scores_response import GetScoresResponse +from pydantic import ValidationError class RawScoresClient: @@ -231,6 +233,13 @@ def get_many( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -332,6 +341,13 @@ def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -548,6 +564,13 @@ async def get_many( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -649,6 +672,13 @@ async def get_by_id( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/scores/types/get_scores_response_data.py b/langfuse/api/scores/types/get_scores_response_data.py index d1cdda417..c0864bad2 100644 --- a/langfuse/api/scores/types/get_scores_response_data.py +++ b/langfuse/api/scores/types/get_scores_response_data.py @@ -15,43 +15,59 @@ class GetScoresResponseData_Numeric(UniversalBaseModel): data_type: typing_extensions.Annotated[ - typing.Literal["NUMERIC"], FieldMetadata(alias="dataType") - ] = "NUMERIC" + typing.Literal["NUMERIC"], + FieldMetadata(alias="dataType"), + pydantic.Field(alias="dataType", default="NUMERIC"), + ] trace: typing.Optional[GetScoresResponseTraceData] = None value: float id: str trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str source: ScoreSource timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = None + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field(alias="authorUserId", default=None), + ] comment: typing.Optional[str] = None metadata: typing.Any config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = None + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field(alias="configId", default=None), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = None + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field(alias="queueId", default=None), + ] environment: str model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( @@ -61,44 +77,62 @@ class GetScoresResponseData_Numeric(UniversalBaseModel): class GetScoresResponseData_Categorical(UniversalBaseModel): data_type: typing_extensions.Annotated[ - typing.Literal["CATEGORICAL"], FieldMetadata(alias="dataType") - ] = "CATEGORICAL" + typing.Literal["CATEGORICAL"], + FieldMetadata(alias="dataType"), + pydantic.Field(alias="dataType", default="CATEGORICAL"), + ] trace: typing.Optional[GetScoresResponseTraceData] = None value: float - string_value: typing_extensions.Annotated[str, FieldMetadata(alias="stringValue")] + string_value: typing_extensions.Annotated[ + str, FieldMetadata(alias="stringValue"), pydantic.Field(alias="stringValue") + ] id: str trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str source: ScoreSource timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = None + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field(alias="authorUserId", default=None), + ] comment: typing.Optional[str] = None metadata: typing.Any config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = None + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field(alias="configId", default=None), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = None + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field(alias="queueId", default=None), + ] environment: str model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( @@ -108,44 +142,62 @@ class GetScoresResponseData_Categorical(UniversalBaseModel): class GetScoresResponseData_Boolean(UniversalBaseModel): data_type: typing_extensions.Annotated[ - typing.Literal["BOOLEAN"], FieldMetadata(alias="dataType") - ] = "BOOLEAN" + typing.Literal["BOOLEAN"], + FieldMetadata(alias="dataType"), + pydantic.Field(alias="dataType", default="BOOLEAN"), + ] trace: typing.Optional[GetScoresResponseTraceData] = None value: float - string_value: typing_extensions.Annotated[str, FieldMetadata(alias="stringValue")] + string_value: typing_extensions.Annotated[ + str, FieldMetadata(alias="stringValue"), pydantic.Field(alias="stringValue") + ] id: str trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str source: ScoreSource timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = None + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field(alias="authorUserId", default=None), + ] comment: typing.Optional[str] = None metadata: typing.Any config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = None + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field(alias="configId", default=None), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = None + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field(alias="queueId", default=None), + ] environment: str model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( @@ -155,44 +207,62 @@ class GetScoresResponseData_Boolean(UniversalBaseModel): class GetScoresResponseData_Correction(UniversalBaseModel): data_type: typing_extensions.Annotated[ - typing.Literal["CORRECTION"], FieldMetadata(alias="dataType") - ] = "CORRECTION" + typing.Literal["CORRECTION"], + FieldMetadata(alias="dataType"), + pydantic.Field(alias="dataType", default="CORRECTION"), + ] trace: typing.Optional[GetScoresResponseTraceData] = None value: float - string_value: typing_extensions.Annotated[str, FieldMetadata(alias="stringValue")] + string_value: typing_extensions.Annotated[ + str, FieldMetadata(alias="stringValue"), pydantic.Field(alias="stringValue") + ] id: str trace_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="traceId") - ] = None + typing.Optional[str], + FieldMetadata(alias="traceId"), + pydantic.Field(alias="traceId", default=None), + ] session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = None + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field(alias="sessionId", default=None), + ] observation_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="observationId") - ] = None + typing.Optional[str], + FieldMetadata(alias="observationId"), + pydantic.Field(alias="observationId", default=None), + ] dataset_run_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="datasetRunId") - ] = None + typing.Optional[str], + FieldMetadata(alias="datasetRunId"), + pydantic.Field(alias="datasetRunId", default=None), + ] name: str source: ScoreSource timestamp: dt.datetime created_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="createdAt") + dt.datetime, FieldMetadata(alias="createdAt"), pydantic.Field(alias="createdAt") ] updated_at: typing_extensions.Annotated[ - dt.datetime, FieldMetadata(alias="updatedAt") + dt.datetime, FieldMetadata(alias="updatedAt"), pydantic.Field(alias="updatedAt") ] author_user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="authorUserId") - ] = None + typing.Optional[str], + FieldMetadata(alias="authorUserId"), + pydantic.Field(alias="authorUserId", default=None), + ] comment: typing.Optional[str] = None metadata: typing.Any config_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="configId") - ] = None + typing.Optional[str], + FieldMetadata(alias="configId"), + pydantic.Field(alias="configId", default=None), + ] queue_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="queueId") - ] = None + typing.Optional[str], + FieldMetadata(alias="queueId"), + pydantic.Field(alias="queueId", default=None), + ] environment: str model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( diff --git a/langfuse/api/scores/types/get_scores_response_trace_data.py b/langfuse/api/scores/types/get_scores_response_trace_data.py index 306aaaf78..313a1f961 100644 --- a/langfuse/api/scores/types/get_scores_response_trace_data.py +++ b/langfuse/api/scores/types/get_scores_response_trace_data.py @@ -10,12 +10,14 @@ class GetScoresResponseTraceData(UniversalBaseModel): user_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="userId") - ] = pydantic.Field(default=None) - """ - The user ID associated with the trace referenced by score - """ - + typing.Optional[str], + FieldMetadata(alias="userId"), + pydantic.Field( + alias="userId", + default=None, + description="The user ID associated with the trace referenced by score", + ), + ] tags: typing.Optional[typing.List[str]] = pydantic.Field(default=None) """ A list of tags associated with the trace referenced by score @@ -27,11 +29,14 @@ class GetScoresResponseTraceData(UniversalBaseModel): """ session_id: typing_extensions.Annotated[ - typing.Optional[str], FieldMetadata(alias="sessionId") - ] = pydantic.Field(default=None) - """ - The session ID associated with the trace referenced by score - """ + typing.Optional[str], + FieldMetadata(alias="sessionId"), + pydantic.Field( + alias="sessionId", + default=None, + description="The session ID associated with the trace referenced by score", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True diff --git a/langfuse/api/sessions/raw_client.py b/langfuse/api/sessions/raw_client.py index 9c39a9a5a..56d3fe5ca 100644 --- a/langfuse/api/sessions/raw_client.py +++ b/langfuse/api/sessions/raw_client.py @@ -15,9 +15,11 @@ from ..core.datetime_utils import serialize_datetime from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.paginated_sessions import PaginatedSessions +from pydantic import ValidationError class RawSessionsClient: @@ -149,6 +151,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -253,6 +262,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -389,6 +405,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -493,6 +516,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/trace/raw_client.py b/langfuse/api/trace/raw_client.py index 10f3d15ec..7baf204d2 100644 --- a/langfuse/api/trace/raw_client.py +++ b/langfuse/api/trace/raw_client.py @@ -15,10 +15,12 @@ from ..core.datetime_utils import serialize_datetime from ..core.http_response import AsyncHttpResponse, HttpResponse from ..core.jsonable_encoder import jsonable_encoder +from ..core.parse_error import ParsingError from ..core.pydantic_utilities import parse_obj_as from ..core.request_options import RequestOptions from .types.delete_trace_response import DeleteTraceResponse from .types.traces import Traces +from pydantic import ValidationError # this is used as the default value for optional parameters OMIT = typing.cast(typing.Any, ...) @@ -123,6 +125,13 @@ def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -224,6 +233,13 @@ def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -501,6 +517,13 @@ def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -609,6 +632,13 @@ def delete_multiple( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -715,6 +745,13 @@ async def get( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -816,6 +853,13 @@ async def delete( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1093,6 +1137,13 @@ async def list( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), @@ -1201,6 +1252,13 @@ async def delete_multiple( headers=dict(_response.headers), body=_response.text, ) + except ValidationError as e: + raise ParsingError( + status_code=_response.status_code, + headers=dict(_response.headers), + body=_response.json(), + cause=e, + ) raise ApiError( status_code=_response.status_code, headers=dict(_response.headers), diff --git a/langfuse/api/utils/pagination/types/meta_response.py b/langfuse/api/utils/pagination/types/meta_response.py index 54d3847be..a6fef1173 100644 --- a/langfuse/api/utils/pagination/types/meta_response.py +++ b/langfuse/api/utils/pagination/types/meta_response.py @@ -19,19 +19,22 @@ class MetaResponse(UniversalBaseModel): number of items per page """ - total_items: typing_extensions.Annotated[int, FieldMetadata(alias="totalItems")] = ( - pydantic.Field() - ) - """ - number of total items given the current filters/selection (if any) - """ - - total_pages: typing_extensions.Annotated[int, FieldMetadata(alias="totalPages")] = ( - pydantic.Field() - ) - """ - number of total pages given the current limit - """ + total_items: typing_extensions.Annotated[ + int, + FieldMetadata(alias="totalItems"), + pydantic.Field( + alias="totalItems", + description="number of total items given the current filters/selection (if any)", + ), + ] + total_pages: typing_extensions.Annotated[ + int, + FieldMetadata(alias="totalPages"), + pydantic.Field( + alias="totalPages", + description="number of total pages given the current limit", + ), + ] model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict( extra="allow", frozen=True