diff --git a/qdrant_client/async_client_base.py b/qdrant_client/async_client_base.py index abbbf8c4..b5646794 100644 --- a/qdrant_client/async_client_base.py +++ b/qdrant_client/async_client_base.py @@ -11,62 +11,12 @@ from typing import Any, Iterable, Mapping, Optional, Sequence, Union from qdrant_client.conversions import common_types as types -from qdrant_client.http import models class AsyncQdrantBase: def __init__(self, **kwargs: Any): pass - async def search_batch( - self, collection_name: str, requests: Sequence[types.SearchRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - - async def search( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - ], - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - async def search_groups( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - ], - group_by: str, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - **kwargs: Any, - ) -> types.GroupsResult: - raise NotImplementedError() - async def search_matrix_offsets( self, collection_name: str, @@ -154,74 +104,6 @@ async def query_points_groups( ) -> types.GroupsResult: raise NotImplementedError() - async def recommend_batch( - self, collection_name: str, requests: Sequence[types.RecommendRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - - async def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - async def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[models.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> types.GroupsResult: - raise NotImplementedError() - - async def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - async def discover_batch( - self, collection_name: str, requests: Sequence[types.DiscoverRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - async def scroll( self, collection_name: str, @@ -374,11 +256,6 @@ async def recreate_collection( ) -> bool: raise NotImplementedError() - def upload_records( - self, collection_name: str, records: Iterable[types.Record], **kwargs: Any - ) -> None: - raise NotImplementedError() - def upload_points( self, collection_name: str, points: Iterable[types.PointStruct], **kwargs: Any ) -> None: @@ -460,15 +337,6 @@ async def recover_shard_snapshot( ) -> Optional[bool]: raise NotImplementedError() - async def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - - async def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - - async def get_locks(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - async def close(self, **kwargs: Any) -> None: pass diff --git a/qdrant_client/async_qdrant_client.py b/qdrant_client/async_qdrant_client.py index 802e4468..7eeedfd2 100644 --- a/qdrant_client/async_qdrant_client.py +++ b/qdrant_client/async_qdrant_client.py @@ -188,22 +188,6 @@ def grpc_points(self) -> grpc.PointsStub: return self._client.grpc_points raise NotImplementedError(f"gRPC client is not supported for {type(self._client)}") - @property - def rest(self) -> AsyncApis[AsyncApiClient]: - """REST Client - - Returns: - An instance of raw REST API client, generated from OpenAPI schema - """ - warnings.warn( - "The 'rest' property is deprecated and will be removed in a future version. Use `http` instead.", - DeprecationWarning, - stacklevel=2, - ) - if isinstance(self._client, AsyncQdrantRemote): - return self._client.rest - raise NotImplementedError(f"REST client is not supported for {type(self._client)}") - @property def http(self) -> AsyncApis[AsyncApiClient]: """REST Client @@ -224,161 +208,6 @@ def init_options(self) -> dict[str, Any]: """ return self._init_options - async def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - timeout: Optional[int] = None, - consistency: Optional[types.ReadConsistency] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - """Perform multiple searches in a collection mitigating network overhead - - Args: - collection_name: Name of the collection - requests: List of search requests - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of search responses - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search_batch` method is deprecated and will be removed in the future. Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.search_batch( - collection_name=collection_name, - requests=requests, - consistency=consistency, - timeout=timeout, - **kwargs, - ) - - async def search( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - append_payload: bool = True, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - """Search for closest vectors in collection taking into account filtering conditions - - Args: - collection_name: Collection to search in - query_vector: - Search for vectors closest to this. - Can be either a vector itself, or a named vector, or a named sparse vector, or a tuple of vector name and vector itself - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many results return - offset: - Offset of the first result to return. - May be used to paginate results. - Note: large offset values may cause performance issues. - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - append_payload: Same as `with_payload`. Deprecated. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - timeout: - Overrides global timeout for this search. Unit is seconds. - - Examples: - - `Search with filter`:: - - qdrant.search( - collection_name="test_collection", - query_vector=[1.0, 0.1, 0.2, 0.7], - query_filter=Filter( - must=[ - FieldCondition( - key='color', - range=Match( - value="red" - ) - ) - ] - ) - ) - - Returns: - List of found close points with similarity scores. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search` method is deprecated and will be removed in the future. Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.search( - collection_name=collection_name, - query_vector=query_vector, - query_filter=query_filter, - search_params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - append_payload=append_payload, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - async def query_batch_points( self, collection_name: str, @@ -607,561 +436,46 @@ async def query_points_groups( types.InferenceObject, None, ] = None, - using: Optional[str] = None, - prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 3, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - """Universal endpoint to group on any available operation, such as search, recommendation, discovery, context search. - - Args: - collection_name: Collection to search in - query: - Query for the chosen search type operation. - - If `str` - use string as UUID of the existing point as a search query. - - If `int` - use integer as ID of the existing point as a search query. - - If `list[float]` - use as a dense vector for nearest search. - - If `list[list[float]]` - use as a multi-vector for nearest search. - - If `SparseVector` - use as a sparse vector for nearest search. - - If `Query` - use as a query for specific search type. - - If `NumpyArray` - use as a dense vector for nearest search. - - If `Document` - infer vector from the document text and use it for nearest search (requires `fastembed` package installed). - - If `None` - return first `limit` points from the collection. - prefetch: prefetch queries to make a selection of the data to be used with the main query - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many results return - group_size: How many results return for each group - group_by: Name of the payload field to group by. Field must be of type "keyword" or "integer". - Nested fields are specified using dot notation, e.g. "nested_field.subfield". - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - using: - Name of the vectors to use for query. - If `None` - use default vectors or provided in named vector structures. - with_lookup: - Look for points in another collection using the group ids. - If specified, each group will contain a record from the specified collection - with the same id as the group id. In addition, the parameter allows to specify - which parts of the record should be returned, like in `with_payload` and `with_vectors` parameters. - lookup_from: - Defines a location (collection and vector field name), used to lookup vectors being referenced in the query as IDs. - If `None` - current collection will be used. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - timeout: - Overrides global timeout for this search. Unit is seconds. - - Examples: - - `Search for closest points and group results`:: - - qdrant.query_points_groups( - collection_name="test_collection", - query=[1.0, 0.1, 0.2, 0.7], - group_by="color", - group_size=3, - ) - - Returns: - List of groups with not more than `group_size` hits in each group. - Each group also contains an id of the group, which is the value of the payload field. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - query = self._resolve_query(query) - if not self.cloud_inference: - if self._inference_inspector.inspect(query) or self._inference_inspector.inspect( - prefetch - ): - query = ( - next( - iter( - self._embed_models( - query, is_query=True, batch_size=self.local_inference_batch_size - ) - ) - ) - if query is not None - else None - ) - if isinstance(prefetch, list): - prefetch = list( - self._embed_models( - prefetch, is_query=True, batch_size=self.local_inference_batch_size - ) - ) - elif prefetch is not None: - prefetch = next( - iter( - self._embed_models( - prefetch, is_query=True, batch_size=self.local_inference_batch_size - ) - ) - ) - return await self._client.query_points_groups( - collection_name=collection_name, - query=query, - prefetch=prefetch, - query_filter=query_filter, - search_params=search_params, - group_by=group_by, - limit=limit, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - with_lookup=with_lookup, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - async def search_groups( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - group_by: str, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - """Search for closest vectors grouped by payload field. - - Searches best matches for query vector grouped by the value of payload field. - Useful to obtain most relevant results for each category, deduplicate results, - finding the best representation vector for the same entity. - - Args: - collection_name: Collection to search in - query_vector: - Search for vectors closest to this. - Can be either a vector itself, or a named vector, or a named sparse vector, or a tuple of vector name and vector itself - group_by: Name of the payload field to group by. - Field must be of type "keyword" or "integer". - Nested fields are specified using dot notation, e.g. "nested_field.subfield". - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many groups return - group_size: How many results return for each group - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: Minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - with_lookup: - Look for points in another collection using the group ids. - If specified, each group will contain a record from the specified collection - with the same id as the group id. In addition, the parameter allows to specify - which parts of the record should be returned, like in `with_payload` and `with_vectors` parameters. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. - Values: - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of groups with not more than `group_size` hits in each group. - Each group also contains an id of the group, which is the value of the payload field. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search_groups` method is deprecated and will be removed in the future. Use `query_points_groups` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.search_groups( - collection_name=collection_name, - query_vector=query_vector, - group_by=group_by, - query_filter=query_filter, - search_params=search_params, - limit=limit, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - with_lookup=with_lookup, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - async def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - """Perform multiple recommend requests in batch mode - - Args: - collection_name: Name of the collection - requests: List of recommend requests - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of recommend responses - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend_batch` method is deprecated and will be removed in the future. Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.recommend_batch( - collection_name=collection_name, - requests=requests, - consistency=consistency, - timeout=timeout, - **kwargs, - ) - - async def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - """Recommend points: search for similar points based on already stored in Qdrant examples. - - Provide IDs of the stored points, and Qdrant will perform search based on already existing vectors. - This functionality is especially useful for recommendation over existing collection of points. - - Args: - collection_name: Collection to search in - positive: - List of stored point IDs or vectors, which should be used as reference for similarity search. - If there is only one example - this request is equivalent to the regular search with vector of that - point. - If there are more than one example, Qdrant will attempt to search for similar to all of them. - Recommendation for multiple vectors is experimental. - Its behaviour may change depending on selected strategy. - negative: - List of stored point IDs or vectors, which should be dissimilar to the search result. - Negative examples is an experimental functionality. - Its behaviour may change depending on selected strategy. - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many results return - offset: - Offset of the first result to return. - May be used to paginate results. - Note: large offset values may cause performance issues. - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - using: - Name of the vectors to use for recommendations. - If `None` - use default vectors. - lookup_from: - Defines a location (collection and vector field name), used to lookup vectors for recommendations. - If `None` - current collection will be used. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - strategy: - Strategy to use for recommendation. - Strategy defines how to combine multiple examples into a recommendation query. - Possible values: - - - 'average_vector' - calculates average vector of all examples and uses it for search - - 'best_score' - finds the result which is closer to positive examples and further from negative - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of recommended points with similarity scores. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend` method is deprecated and will be removed in the future. Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.recommend( - collection_name=collection_name, - positive=positive, - negative=negative, - query_filter=query_filter, - search_params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - lookup_from=lookup_from, - consistency=consistency, - shard_key_selector=shard_key_selector, - strategy=strategy, - timeout=timeout, - **kwargs, - ) - - async def search_matrix_pairs( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> types.SearchMatrixPairsResponse: - """ - Compute distance matrix for sampled points with a pair-based output format. - - Args: - collection_name: Name of the collection. - query_filter: Filter to apply. - limit: How many neighbors per sample to find. - sample: How many points to select and search within. - using: Name of the vectors to use for search. If `None`, use default vectors. - consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - int: Number of replicas to query, values should be present in all queried replicas. - - 'majority': Query all replicas, but return values present in the majority of replicas. - - 'quorum': Query the majority of replicas, return values present in all of them. - - 'all': Query all replicas, and return values present in all replicas. - timeout: Overrides global timeout for this search. Unit is seconds. - shard_key_selector: This parameter allows specifying which shards should be queried. - If `None`, query all shards. Only works for collections with the `custom` sharding method. - - Returns: - Distance matrix using a pair-based encoding. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return await self._client.search_matrix_pairs( - collection_name=collection_name, - query_filter=query_filter, - limit=limit, - sample=sample, - using=using, - consistency=consistency, - timeout=timeout, - shard_key_selector=shard_key_selector, - **kwargs, - ) - - async def search_matrix_offsets( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> types.SearchMatrixOffsetsResponse: - """ - Compute distance matrix for sampled points with an offset-based output format. - - Args: - collection_name: Name of the collection. - query_filter: Filter to apply. - limit: How many neighbors per sample to find. - sample: How many points to select and search within. - using: Name of the vectors to use for search. If `None`, use default vectors. - consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - int: Number of replicas to query, values should present in all queried replicas. - - 'majority': Query all replicas, but return values present in the majority of replicas. - - 'quorum': Query the majority of replicas, return values present in all of them. - - 'all': Query all replicas and return values present in all replicas. - timeout: Overrides global timeout for this search. Unit is seconds. - shard_key_selector: This parameter allows specifying which shards should be queried. - If `None`, query all shards. Only works for collections with the `custom` sharding method. - - Returns: - Distance matrix using an offset-based encoding. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return await self._client.search_matrix_offsets( - collection_name=collection_name, - query_filter=query_filter, - limit=limit, - sample=sample, - using=using, - consistency=consistency, - timeout=timeout, - shard_key_selector=shard_key_selector, - **kwargs, - ) - - async def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, + using: Optional[str] = None, + prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, query_filter: Optional[types.Filter] = None, search_params: Optional[types.SearchParams] = None, limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, + group_size: int = 3, with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, + score_threshold: Optional[float] = None, with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, + lookup_from: Optional[types.LookupLocation] = None, consistency: Optional[types.ReadConsistency] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> types.GroupsResult: - """Recommend point groups: search for similar points based on already stored in Qdrant examples - and groups by payload field. - - Recommend best matches for given stored examples grouped by the value of payload field. - Useful to obtain most relevant results for each category, deduplicate results, - finding the best representation vector for the same entity. + """Universal endpoint to group on any available operation, such as search, recommendation, discovery, context search. Args: collection_name: Collection to search in - positive: - List of stored point IDs or vectors, which should be used as reference for similarity search. - If there is only one example - this request is equivalent to the regular search with vector of that - point. - If there are more than one example, Qdrant will attempt to search for similar to all of them. - Recommendation for multiple vectors is experimental. - Its behaviour may change depending on selected strategy. - negative: - List of stored point IDs or vectors, which should be dissimilar to the search result. - Negative examples is an experimental functionality. - Its behaviour may change depending on selected strategy. - group_by: Name of the payload field to group by. - Field must be of type "keyword" or "integer". - Nested fields are specified using dot notation, e.g. "nested_field.subfield". + query: + Query for the chosen search type operation. + - If `str` - use string as UUID of the existing point as a search query. + - If `int` - use integer as ID of the existing point as a search query. + - If `list[float]` - use as a dense vector for nearest search. + - If `list[list[float]]` - use as a multi-vector for nearest search. + - If `SparseVector` - use as a sparse vector for nearest search. + - If `Query` - use as a query for specific search type. + - If `NumpyArray` - use as a dense vector for nearest search. + - If `Document` - infer vector from the document text and use it for nearest search (requires `fastembed` package installed). + - If `None` - return first `limit` points from the collection. + prefetch: prefetch queries to make a selection of the data to be used with the main query query_filter: - Exclude vectors which doesn't fit given conditions. - If `None` - search among all vectors search_params: Additional search params - limit: How many groups return + limit: How many results return group_size: How many results return for each group + group_by: Name of the payload field to group by. Field must be of type "keyword" or "integer". + Nested fields are specified using dot notation, e.g. "nested_field.subfield". with_payload: - Specify which stored payload should be attached to the result. - If `True` - attach all payload @@ -1180,16 +494,16 @@ async def recommend_groups( on the Distance function used. E.g. for cosine similarity only higher scores will be returned. using: - Name of the vectors to use for recommendations. - If `None` - use default vectors. - lookup_from: - Defines a location (collection and vector field name), used to lookup vectors for recommendations. - If `None` - current collection will be used. + Name of the vectors to use for query. + If `None` - use default vectors or provided in named vector structures. with_lookup: Look for points in another collection using the group ids. If specified, each group will contain a record from the specified collection with the same id as the group id. In addition, the parameter allows to specify which parts of the record should be returned, like in `with_payload` and `with_vectors` parameters. + lookup_from: + Defines a location (collection and vector field name), used to lookup vectors being referenced in the query as IDs. + If `None` - current collection will be used. consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: @@ -1200,170 +514,164 @@ async def recommend_groups( shard_key_selector: This parameter allows to specify which shards should be queried. If `None` - query all shards. Only works for collections with `custom` sharding method. - strategy: - Strategy to use for recommendation. - Strategy defines how to combine multiple examples into a recommendation query. - Possible values: - - - 'average_vector' - calculates average vector of all examples and uses it for search - - 'best_score' - finds the result which is closer to positive examples and further from negative timeout: Overrides global timeout for this search. Unit is seconds. - Returns: + Examples: + + `Search for closest points and group results`:: + + qdrant.query_points_groups( + collection_name="test_collection", + query=[1.0, 0.1, 0.2, 0.7], + group_by="color", + group_size=3, + ) + + Returns: List of groups with not more than `group_size` hits in each group. Each group also contains an id of the group, which is the value of the payload field. - """ assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend_groups` method is deprecated and will be removed in the future. Use `query_points_groups` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.recommend_groups( + query = self._resolve_query(query) + if not self.cloud_inference: + if self._inference_inspector.inspect(query) or self._inference_inspector.inspect( + prefetch + ): + query = ( + next( + iter( + self._embed_models( + query, is_query=True, batch_size=self.local_inference_batch_size + ) + ) + ) + if query is not None + else None + ) + if isinstance(prefetch, list): + prefetch = list( + self._embed_models( + prefetch, is_query=True, batch_size=self.local_inference_batch_size + ) + ) + elif prefetch is not None: + prefetch = next( + iter( + self._embed_models( + prefetch, is_query=True, batch_size=self.local_inference_batch_size + ) + ) + ) + return await self._client.query_points_groups( collection_name=collection_name, - group_by=group_by, - positive=positive, - negative=negative, + query=query, + prefetch=prefetch, query_filter=query_filter, search_params=search_params, + group_by=group_by, limit=limit, group_size=group_size, - score_threshold=score_threshold, with_payload=with_payload, with_vectors=with_vectors, + score_threshold=score_threshold, using=using, - lookup_from=lookup_from, with_lookup=with_lookup, - strategy=strategy, consistency=consistency, shard_key_selector=shard_key_selector, timeout=timeout, **kwargs, ) - async def discover( + async def search_matrix_pairs( self, collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, + limit: int = 3, + sample: int = 10, using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, **kwargs: Any, - ) -> list[types.ScoredPoint]: + ) -> types.SearchMatrixPairsResponse: """ - Use context and a target to find the most similar points, constrained by the context. + Compute distance matrix for sampled points with a pair-based output format. Args: - collection_name: Collection to discover in - - target: - Look for vectors closest to this. - - When using the target (with or without context), the integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. - - context: - Pairs of { positive, negative } examples to constrain the search. - - When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. - - Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. - - For discovery search (when including a target), the context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. - - query_filter: - Look only for points which satisfies this conditions - - search_params: - Additional search params - - limit: - Max number of result to return - - offset: - Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. - - with_payload: - Select which payload to return with the response. Default: None - - with_vectors: - Whether to return the point vector with the result? - - using: - Define which vector to use for recommendation, if not specified - try to use default vector. - - lookup_from: - The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection. - - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - - timeout: - Overrides global timeout for this search. Unit is seconds. + collection_name: Name of the collection. + query_filter: Filter to apply. + limit: How many neighbors per sample to find. + sample: How many points to select and search within. + using: Name of the vectors to use for search. If `None`, use default vectors. + consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: + - int: Number of replicas to query, values should be present in all queried replicas. + - 'majority': Query all replicas, but return values present in the majority of replicas. + - 'quorum': Query the majority of replicas, return values present in all of them. + - 'all': Query all replicas, and return values present in all replicas. + timeout: Overrides global timeout for this search. Unit is seconds. + shard_key_selector: This parameter allows specifying which shards should be queried. + If `None`, query all shards. Only works for collections with the `custom` sharding method. Returns: - List of discovered points with discovery or context scores, accordingly. + Distance matrix using a pair-based encoding. """ - warnings.warn( - "`discover` method is deprecated and will be removed in the future. Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.discover( + assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" + return await self._client.search_matrix_pairs( collection_name=collection_name, - target=target, - context=context, query_filter=query_filter, - search_params=search_params, limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, + sample=sample, using=using, - lookup_from=lookup_from, consistency=consistency, - shard_key_selector=shard_key_selector, timeout=timeout, + shard_key_selector=shard_key_selector, **kwargs, ) - async def discover_batch( + async def search_matrix_offsets( self, collection_name: str, - requests: Sequence[types.DiscoverRequest], + query_filter: Optional[types.Filter] = None, + limit: int = 3, + sample: int = 10, + using: Optional[str] = None, consistency: Optional[types.ReadConsistency] = None, timeout: Optional[int] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - warnings.warn( - "`discover_batch` method is deprecated and will be removed in the future. Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return await self._client.discover_batch( + ) -> types.SearchMatrixOffsetsResponse: + """ + Compute distance matrix for sampled points with an offset-based output format. + + Args: + collection_name: Name of the collection. + query_filter: Filter to apply. + limit: How many neighbors per sample to find. + sample: How many points to select and search within. + using: Name of the vectors to use for search. If `None`, use default vectors. + consistency: Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: + - int: Number of replicas to query, values should present in all queried replicas. + - 'majority': Query all replicas, but return values present in the majority of replicas. + - 'quorum': Query the majority of replicas, return values present in all of them. + - 'all': Query all replicas and return values present in all replicas. + timeout: Overrides global timeout for this search. Unit is seconds. + shard_key_selector: This parameter allows specifying which shards should be queried. + If `None`, query all shards. Only works for collections with the `custom` sharding method. + + Returns: + Distance matrix using an offset-based encoding. + """ + assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" + return await self._client.search_matrix_offsets( collection_name=collection_name, - requests=requests, + query_filter=query_filter, + limit=limit, + sample=sample, + using=using, consistency=consistency, timeout=timeout, + shard_key_selector=shard_key_selector, **kwargs, ) @@ -1533,6 +841,7 @@ async def upsert( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: """ @@ -1558,6 +867,8 @@ async def upsert( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: If specified, only points that match this filter will be updated, others will be inserted + Returns: Operation Result(UpdateResult) """ @@ -1594,6 +905,7 @@ async def upsert( wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, **kwargs, ) @@ -1604,6 +916,7 @@ async def update_vectors( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: """Update specified vectors in the collection. Keeps payload and unspecified vectors unchanged. @@ -1632,6 +945,9 @@ async def update_vectors( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: + If specified, only points that match this filter will be updated + Returns: Operation Result(UpdateResult) """ @@ -1648,6 +964,7 @@ async def update_vectors( wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) async def delete_vectors( @@ -2184,6 +1501,7 @@ async def update_collection( timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Update parameters of the collection @@ -2200,6 +1518,7 @@ async def update_collection( If timeout is reached - request will return with service error. sparse_vectors_config: Override for sparse vector-specific configuration strict_mode_config: Override for strict mode configuration + metadata: Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata Returns: Operation result """ @@ -2220,6 +1539,7 @@ async def update_collection( timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) @@ -2258,9 +1578,9 @@ async def create_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Create empty collection with given parameters @@ -2302,11 +1622,11 @@ async def create_collection( optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled - init_from: Use data stored in another collection to initialize this collection timeout: Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error. strict_mode_config: Configure limitations for the collection, such as max size, rate limits, etc. + metadata: Arbitrary JSON-like metadata for the collection Returns: Operation result @@ -2324,10 +1644,10 @@ async def create_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) @@ -2345,9 +1665,9 @@ async def recreate_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Delete and create empty collection with given parameters @@ -2389,11 +1709,11 @@ async def recreate_collection( optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled - init_from: Use data stored in another collection to initialize this collection timeout: Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error. strict_mode_config: Configure limitations for the collection, such as max size, rate limits, etc. + metadata: Arbitrary JSON metadata for the collection Returns: Operation result @@ -2416,65 +1736,13 @@ async def recreate_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) - def upload_records( - self, - collection_name: str, - records: Iterable[types.Record], - batch_size: int = 64, - parallel: int = 1, - method: Optional[str] = None, - max_retries: int = 3, - wait: bool = False, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> None: - """Upload records to the collection - - Similar to `upload_collection` method, but operates with records, rather than vector and payload individually. - - Args: - collection_name: Name of the collection to upload to - records: Iterator over records to upload - batch_size: How many vectors upload per-request, Default: 64 - parallel: Number of parallel processes of upload - method: Start method for parallel processes, Default: forkserver - max_retries: maximum number of retries in case of a failure - during the upload of a batch - wait: - Await for the results to be applied on the server side. - If `true`, each update request will explicitly wait for the confirmation of completion. Might be slower. - If `false`, each update request will return immediately after the confirmation of receiving. - Default: `false` - shard_key_selector: Defines the shard groups that should be used to write updates into. - If multiple shard_keys are provided, the update will be written to each of them. - Only works for collections with `custom` sharding method. - This parameter overwrites shard keys written in the records. - - """ - warnings.warn( - "`upload_records` is deprecated, use `upload_points` instead", - DeprecationWarning, - stacklevel=2, - ) - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return self._client.upload_records( - collection_name=collection_name, - records=records, - batch_size=batch_size, - parallel=parallel, - method=method, - max_retries=max_retries, - wait=wait, - shard_key_selector=shard_key_selector, - ) - def upload_points( self, collection_name: str, @@ -2485,6 +1753,7 @@ def upload_points( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: """Upload points to the collection @@ -2508,7 +1777,7 @@ def upload_points( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. This parameter overwrites shard keys written in the records. - + update_filter: If specified, only points that match this filter will be updated, others will be inserted """ def chain(*iterables: Iterable) -> Iterable: @@ -2538,6 +1807,7 @@ def chain(*iterables: Iterable) -> Iterable: max_retries=max_retries, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def upload_collection( @@ -2554,12 +1824,13 @@ def upload_collection( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: """Upload vectors and payload to the collection. This method will perform automatic batching of the data. If you need to perform a single update, use `upsert` method. - Note: use `upload_records` method if you want to upload multiple vectors with single payload. + Note: use `upload_points` method if you want to upload multiple vectors with single payload. Args: collection_name: Name of the collection to upload to @@ -2579,6 +1850,7 @@ def upload_collection( shard_key_selector: Defines the shard groups that should be used to write updates into. If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: If specified, only points that match this filter will be updated, others will be inserted """ def chain(*iterables: Iterable) -> Iterable: @@ -2611,6 +1883,7 @@ def chain(*iterables: Iterable) -> Iterable: max_retries=max_retries, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) async def create_payload_index( @@ -2955,21 +2228,6 @@ async def recover_shard_snapshot( **kwargs, ) - async def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - """Lock storage for writing.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return await self._client.lock_storage(reason=reason, **kwargs) - - async def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - """Unlock storage for writing.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return await self._client.unlock_storage(**kwargs) - - async def get_locks(self, **kwargs: Any) -> types.LocksOption: - """Get current locks state.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - return await self._client.get_locks(**kwargs) - async def create_shard_key( self, collection_name: str, diff --git a/qdrant_client/async_qdrant_fastembed.py b/qdrant_client/async_qdrant_fastembed.py index dcf33dd7..7cb3123d 100644 --- a/qdrant_client/async_qdrant_fastembed.py +++ b/qdrant_client/async_qdrant_fastembed.py @@ -16,7 +16,7 @@ import numpy as np from pydantic import BaseModel from qdrant_client import grpc -from qdrant_client.common.client_warnings import show_warning +from qdrant_client.common.client_warnings import show_warning, show_warning_once from qdrant_client.async_client_base import AsyncQdrantBase from qdrant_client.embed.embedder import Embedder from qdrant_client.embed.model_embedder import ModelEmbedder @@ -534,6 +534,9 @@ async def add( List of IDs of added documents. If no ids provided, UUIDs will be randomly generated on client side. """ + show_warning_once( + "`add` method has been deprecated and will be removed in 1.17. Instead, inference can be done internally within regular methods like `upsert` by wrapping data into `models.Document` or `models.Image`." + ) encoded_docs = self._embed_documents( documents=documents, embedding_model_name=self.embedding_model_name, @@ -599,12 +602,15 @@ async def query( - Exclude vectors which doesn't fit given conditions. - If `None` - search among all vectors limit: How many results return - **kwargs: Additional search parameters. See `qdrant_client.models.SearchRequest` for details. + **kwargs: Additional search parameters. See `qdrant_client.models.QueryRequest` for details. Returns: list[types.ScoredPoint]: List of scored points. """ + show_warning_once( + "`query` method has been deprecated and will be removed in 1.17. Instead, inference can be done internally within regular methods like `query_points` by wrapping data into `models.Document` or `models.Image`." + ) embedding_model_inst = self._get_or_init_model( model_name=self.embedding_model_name, deprecated=True ) @@ -612,16 +618,17 @@ async def query( query_vector = embeddings[0].tolist() if self.sparse_embedding_model_name is None: return self._scored_points_to_query_responses( - await self.search( - collection_name=collection_name, - query_vector=models.NamedVector( - name=self.get_vector_field_name(), vector=query_vector - ), - query_filter=query_filter, - limit=limit, - with_payload=True, - **kwargs, - ) + ( + await self.query_points( + collection_name=collection_name, + query=query_vector, + using=self.get_vector_field_name(), + query_filter=query_filter, + limit=limit, + with_payload=True, + **kwargs, + ) + ).points ) sparse_embedding_model_inst = self._get_or_init_sparse_model( model_name=self.sparse_embedding_model_name, deprecated=True @@ -630,27 +637,29 @@ async def query( sparse_query_vector = models.SparseVector( indices=sparse_vector.indices.tolist(), values=sparse_vector.values.tolist() ) - dense_request = models.SearchRequest( - vector=models.NamedVector(name=self.get_vector_field_name(), vector=query_vector), + dense_request = models.QueryRequest( + query=query_vector, + using=self.get_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, **kwargs, ) - sparse_request = models.SearchRequest( - vector=models.NamedSparseVector( - name=self.get_sparse_vector_field_name(), vector=sparse_query_vector - ), + sparse_request = models.QueryRequest( + query=sparse_query_vector, + using=self.get_sparse_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, **kwargs, ) - (dense_request_response, sparse_request_response) = await self.search_batch( + (dense_request_response, sparse_request_response) = await self.query_batch_points( collection_name=collection_name, requests=[dense_request, sparse_request] ) return self._scored_points_to_query_responses( - reciprocal_rank_fusion([dense_request_response, sparse_request_response], limit=limit) + reciprocal_rank_fusion( + [dense_request_response.points, sparse_request_response.points], limit=limit + ) ) async def query_batch( @@ -675,22 +684,24 @@ async def query_batch( - If `None` - search among all vectors This filter will be applied to all search requests. limit: How many results return - **kwargs: Additional search parameters. See `qdrant_client.models.SearchRequest` for details. + **kwargs: Additional search parameters. See `qdrant_client.models.QueryRequest` for details. Returns: list[list[QueryResponse]]: List of lists of responses for each query text. """ + show_warning_once( + "`query_batch` method has been deprecated and will be removed in 1.17. Instead, inference can be done internally within regular methods like `query_batch_points` by wrapping data into `models.Document` or `models.Image`." + ) embedding_model_inst = self._get_or_init_model( model_name=self.embedding_model_name, deprecated=True ) query_vectors = list(embedding_model_inst.query_embed(query=query_texts)) requests = [] for vector in query_vectors: - request = models.SearchRequest( - vector=models.NamedVector( - name=self.get_vector_field_name(), vector=vector.tolist() - ), + request = models.QueryRequest( + query=vector.tolist(), + using=self.get_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, @@ -698,8 +709,12 @@ async def query_batch( ) requests.append(request) if self.sparse_embedding_model_name is None: - responses = await self.search_batch(collection_name=collection_name, requests=requests) - return [self._scored_points_to_query_responses(response) for response in responses] + responses = await self.query_batch_points( + collection_name=collection_name, requests=requests + ) + return [ + self._scored_points_to_query_responses(response.points) for response in responses + ] sparse_embedding_model_inst = self._get_or_init_sparse_model( model_name=self.sparse_embedding_model_name, deprecated=True ) @@ -710,21 +725,22 @@ async def query_batch( for sparse_vector in sparse_embedding_model_inst.embed(documents=query_texts) ] for sparse_vector in sparse_query_vectors: - request = models.SearchRequest( - vector=models.NamedSparseVector( - name=self.get_sparse_vector_field_name(), vector=sparse_vector - ), + request = models.QueryRequest( + using=self.get_sparse_vector_field_name(), + query=sparse_vector, filter=query_filter, limit=limit, with_payload=True, **kwargs, ) requests.append(request) - responses = await self.search_batch(collection_name=collection_name, requests=requests) + responses = await self.query_batch_points( + collection_name=collection_name, requests=requests + ) dense_responses = responses[: len(query_texts)] sparse_responses = responses[len(query_texts) :] responses = [ - reciprocal_rank_fusion([dense_response, sparse_response], limit=limit) + reciprocal_rank_fusion([dense_response.points, sparse_response.points], limit=limit) for (dense_response, sparse_response) in zip(dense_responses, sparse_responses) ] return [self._scored_points_to_query_responses(response) for response in responses] @@ -756,7 +772,7 @@ def _resolve_query( Raises: ValueError: if query is not of supported type """ - if isinstance(query, get_args(types.Query)) or isinstance(query, grpc.Query): + if isinstance(query, get_args(types.Query)): return query if isinstance(query, types.SparseVector): return models.NearestQuery(nearest=query) diff --git a/qdrant_client/async_qdrant_remote.py b/qdrant_client/async_qdrant_remote.py index 87726927..f333eb31 100644 --- a/qdrant_client/async_qdrant_remote.py +++ b/qdrant_client/async_qdrant_remote.py @@ -27,7 +27,6 @@ get_args, ) import httpx -import numpy as np from grpc import Compression from urllib3.util import Url, parse_url from urllib.parse import urljoin @@ -375,160 +374,6 @@ def http(self) -> AsyncApis[AsyncApiClient]: """ return self.openapi_client - async def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - if self._prefer_grpc: - requests = [ - RestToGrpc.convert_search_request(r, collection_name) - if isinstance(r, models.SearchRequest) - else r - for r in requests - ] - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - grpc_res: grpc.SearchBatchResponse = await self.grpc_points.SearchBatch( - grpc.SearchBatchPoints( - collection_name=collection_name, - search_points=requests, - read_consistency=consistency, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - GrpcToRest.convert_search_points(r) if isinstance(r, grpc.SearchPoints) else r - for r in requests - ] - http_res: Optional[list[list[models.ScoredPoint]]] = ( - await self.http.search_api.search_batch_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_request_batch=models.SearchRequestBatch(searches=requests), - ) - ).result - assert http_res is not None, "Search batch returned None" - return http_res - - async def search( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - append_payload: bool = True, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - if not append_payload: - show_warning_once( - message="Usage of `append_payload` is deprecated. Please consider using `with_payload` instead", - category=DeprecationWarning, - stacklevel=5, - idx="search-append-payload", - ) - with_payload = append_payload - if isinstance(query_vector, np.ndarray): - query_vector = query_vector.tolist() - if self._prefer_grpc: - vector_name = None - sparse_indices = None - if isinstance(query_vector, types.NamedVector): - vector = query_vector.vector - vector_name = query_vector.name - elif isinstance(query_vector, types.NamedSparseVector): - vector_name = query_vector.name - sparse_indices = grpc.SparseIndices(data=query_vector.vector.indices) - vector = query_vector.vector.values - elif isinstance(query_vector, tuple): - vector_name = query_vector[0] - vector = query_vector[1] - else: - vector = list(query_vector) - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - res: grpc.SearchResponse = await self.grpc_points.Search( - grpc.SearchPoints( - collection_name=collection_name, - vector=vector, - vector_name=vector_name, - filter=query_filter, - limit=limit, - offset=offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - read_consistency=consistency, - timeout=timeout, - sparse_indices=sparse_indices, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] - else: - if isinstance(query_vector, tuple): - query_vector = types.NamedVector(name=query_vector[0], vector=query_vector[1]) - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - search_result = await self.http.search_api.search_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_request=models.SearchRequest( - vector=query_vector, - filter=query_filter, - limit=limit, - offset=offset, - params=search_params, - with_vector=with_vectors, - with_payload=with_payload, - score_threshold=score_threshold, - shard_key=shard_key_selector, - ), - ) - result: Optional[list[types.ScoredPoint]] = search_result.result - assert result is not None, "Search returned None" - return result - async def query_points( self, collection_name: str, @@ -606,17 +451,6 @@ async def query_points( scored_points = [GrpcToRest.convert_scored_point(hit) for hit in res.result] return models.QueryResponse(points=scored_points) else: - if isinstance(query, grpc.Query): - query = GrpcToRest.convert_query(query) - if isinstance(prefetch, grpc.PrefetchQuery): - prefetch = GrpcToRest.convert_prefetch_query(prefetch) - if isinstance(prefetch, list): - prefetch = [ - GrpcToRest.convert_prefetch_query(p) - if isinstance(p, grpc.PrefetchQuery) - else p - for p in prefetch - ] if isinstance(query_filter, grpc.Filter): query_filter = GrpcToRest.convert_filter(model=query_filter) if isinstance(search_params, grpc.SearchParams): @@ -682,10 +516,6 @@ async def query_batch_points( for r in grpc_res.result ] else: - requests = [ - GrpcToRest.convert_query_points(r) if isinstance(r, grpc.QueryPoints) else r - for r in requests - ] http_res: Optional[list[models.QueryResponse]] = ( await self.http.search_api.query_batch_points( collection_name=collection_name, @@ -723,527 +553,22 @@ async def query_points_groups( with_vectors: Union[bool, Sequence[str]] = False, score_threshold: Optional[float] = None, with_lookup: Optional[types.WithLookupInterface] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - if self._prefer_grpc: - if query is not None: - query = RestToGrpc.convert_query(query) - if isinstance(prefetch, models.Prefetch): - prefetch = [RestToGrpc.convert_prefetch_query(prefetch)] - if isinstance(prefetch, list): - prefetch = [ - RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p - for p in prefetch - ] - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - result: grpc.QueryGroupsResponse = ( - await self.grpc_points.QueryGroups( - grpc.QueryPointGroups( - collection_name=collection_name, - query=query, - prefetch=prefetch, - filter=query_filter, - limit=limit, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - using=using, - group_by=group_by, - group_size=group_size, - with_lookup=with_lookup, - lookup_from=lookup_from, - timeout=timeout, - shard_key_selector=shard_key_selector, - read_consistency=consistency, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - ).result - return GrpcToRest.convert_groups_result(result) - else: - if isinstance(query, grpc.Query): - query = GrpcToRest.convert_query(query) - if isinstance(prefetch, grpc.PrefetchQuery): - prefetch = GrpcToRest.convert_prefetch_query(prefetch) - if isinstance(prefetch, list): - prefetch = [ - GrpcToRest.convert_prefetch_query(p) - if isinstance(p, grpc.PrefetchQuery) - else p - for p in prefetch - ] - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - query_request = models.QueryGroupsRequest( - shard_key=shard_key_selector, - prefetch=prefetch, - query=query, - using=using, - filter=query_filter, - params=search_params, - score_threshold=score_threshold, - limit=limit, - group_by=group_by, - group_size=group_size, - with_vector=with_vectors, - with_payload=with_payload, - with_lookup=with_lookup, - lookup_from=lookup_from, - ) - query_result = await self.http.search_api.query_points_groups( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - query_groups_request=query_request, - ) - assert query_result is not None, "Query points groups API returned None" - return query_result.result - - async def search_groups( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - group_by: str, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - if self._prefer_grpc: - vector_name = None - sparse_indices = None - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - if isinstance(query_vector, types.NamedVector): - vector = query_vector.vector - vector_name = query_vector.name - elif isinstance(query_vector, types.NamedSparseVector): - vector_name = query_vector.name - sparse_indices = grpc.SparseIndices(data=query_vector.vector.indices) - vector = query_vector.vector.values - elif isinstance(query_vector, tuple): - vector_name = query_vector[0] - vector = query_vector[1] - else: - vector = list(query_vector) - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - result: grpc.GroupsResult = ( - await self.grpc_points.SearchGroups( - grpc.SearchPointGroups( - collection_name=collection_name, - vector=vector, - vector_name=vector_name, - filter=query_filter, - limit=limit, - group_size=group_size, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - group_by=group_by, - read_consistency=consistency, - with_lookup=with_lookup, - timeout=timeout, - sparse_indices=sparse_indices, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - ).result - return GrpcToRest.convert_groups_result(result) - else: - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - if isinstance(query_vector, tuple): - query_vector = construct( - models.NamedVector, name=query_vector[0], vector=query_vector[1] - ) - if isinstance(query_vector, np.ndarray): - query_vector = query_vector.tolist() - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - search_groups_request = construct( - models.SearchGroupsRequest, - vector=query_vector, - filter=query_filter, - params=search_params, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - group_by=group_by, - group_size=group_size, - limit=limit, - with_lookup=with_lookup, - shard_key=shard_key_selector, - ) - return ( - await self.openapi_client.search_api.search_point_groups( - search_groups_request=search_groups_request, - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - ) - ).result - - async def search_matrix_pairs( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.SearchMatrixPairsResponse: - if self._prefer_grpc: - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - response = await self.grpc_points.SearchMatrixPairs( - grpc.SearchMatrixPoints( - collection_name=collection_name, - filter=query_filter, - sample=sample, - limit=limit, - using=using, - timeout=timeout, - read_consistency=consistency, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return GrpcToRest.convert_search_matrix_pairs(response.result) - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - search_matrix_result = ( - await self.openapi_client.search_api.search_matrix_pairs( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_matrix_request=models.SearchMatrixRequest( - shard_key=shard_key_selector, - limit=limit, - sample=sample, - using=using, - filter=query_filter, - ), - ) - ).result - assert search_matrix_result is not None, "Search matrix pairs returned None result" - return search_matrix_result - - async def search_matrix_offsets( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.SearchMatrixOffsetsResponse: - if self._prefer_grpc: - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - response = await self.grpc_points.SearchMatrixOffsets( - grpc.SearchMatrixPoints( - collection_name=collection_name, - filter=query_filter, - sample=sample, - limit=limit, - using=using, - timeout=timeout, - read_consistency=consistency, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return GrpcToRest.convert_search_matrix_offsets(response.result) - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - search_matrix_result = ( - await self.openapi_client.search_api.search_matrix_offsets( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_matrix_request=models.SearchMatrixRequest( - shard_key=shard_key_selector, - limit=limit, - sample=sample, - using=using, - filter=query_filter, - ), - ) - ).result - assert search_matrix_result is not None, "Search matrix offsets returned None result" - return search_matrix_result - - async def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - if self._prefer_grpc: - requests = [ - RestToGrpc.convert_recommend_request(r, collection_name) - if isinstance(r, models.RecommendRequest) - else r - for r in requests - ] - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - grpc_res: grpc.SearchBatchResponse = await self.grpc_points.RecommendBatch( - grpc.RecommendBatchPoints( - collection_name=collection_name, - recommend_points=requests, - read_consistency=consistency, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - GrpcToRest.convert_recommend_points(r) - if isinstance(r, grpc.RecommendPoints) - else r - for r in requests - ] - http_res: list[list[models.ScoredPoint]] = ( - await self.http.search_api.recommend_batch_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_request_batch=models.RecommendRequestBatch(searches=requests), - ) - ).result - return http_res - - async def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - if positive is None: - positive = [] - if negative is None: - negative = [] - if self._prefer_grpc: - positive_ids = RestToGrpc.convert_recommend_examples_to_ids(positive) - positive_vectors = RestToGrpc.convert_recommend_examples_to_vectors(positive) - negative_ids = RestToGrpc.convert_recommend_examples_to_ids(negative) - negative_vectors = RestToGrpc.convert_recommend_examples_to_vectors(negative) - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - if isinstance(strategy, (str, models.RecommendStrategy)): - strategy = RestToGrpc.convert_recommend_strategy(strategy) - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - res: grpc.SearchResponse = await self.grpc_points.Recommend( - grpc.RecommendPoints( - collection_name=collection_name, - positive=positive_ids, - negative=negative_ids, - filter=query_filter, - limit=limit, - offset=offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - using=using, - lookup_from=lookup_from, - read_consistency=consistency, - strategy=strategy, - positive_vectors=positive_vectors, - negative_vectors=negative_vectors, - shard_key_selector=shard_key_selector, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] - else: - positive = [ - GrpcToRest.convert_point_id(example) - if isinstance(example, grpc.PointId) - else example - for example in positive - ] - negative = [ - GrpcToRest.convert_point_id(example) - if isinstance(example, grpc.PointId) - else example - for example in negative - ] - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - result = ( - await self.openapi_client.search_api.recommend_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_request=models.RecommendRequest( - filter=query_filter, - positive=positive, - negative=negative, - params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - lookup_from=lookup_from, - using=using, - strategy=strategy, - shard_key=shard_key_selector, - ), - ) - ).result - assert result is not None, "Recommend points API returned None" - return result - - async def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - negative: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[models.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, + lookup_from: Optional[types.LookupLocation] = None, consistency: Optional[types.ReadConsistency] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> types.GroupsResult: - positive = positive if positive is not None else [] - negative = negative if negative is not None else [] if self._prefer_grpc: - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - positive_ids = RestToGrpc.convert_recommend_examples_to_ids(positive) - positive_vectors = RestToGrpc.convert_recommend_examples_to_vectors(positive) - negative_ids = RestToGrpc.convert_recommend_examples_to_ids(negative) - negative_vectors = RestToGrpc.convert_recommend_examples_to_vectors(negative) + if query is not None: + query = RestToGrpc.convert_query(query) + if isinstance(prefetch, models.Prefetch): + prefetch = [RestToGrpc.convert_prefetch_query(prefetch)] + if isinstance(prefetch, list): + prefetch = [ + RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p + for p in prefetch + ] if isinstance(query_filter, models.Filter): query_filter = RestToGrpc.convert_filter(model=query_filter) if isinstance(search_params, models.SearchParams): @@ -1252,58 +577,42 @@ async def recommend_groups( with_payload = RestToGrpc.convert_with_payload_interface(with_payload) if isinstance(with_vectors, get_args_subscribed(models.WithVector)): with_vectors = RestToGrpc.convert_with_vectors(with_vectors) + if isinstance(with_lookup, models.WithLookup): + with_lookup = RestToGrpc.convert_with_lookup(with_lookup) + if isinstance(with_lookup, str): + with_lookup = grpc.WithLookup(collection=with_lookup) if isinstance(lookup_from, models.LookupLocation): lookup_from = RestToGrpc.convert_lookup_location(lookup_from) if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): consistency = RestToGrpc.convert_read_consistency(consistency) - if isinstance(strategy, (str, models.RecommendStrategy)): - strategy = RestToGrpc.convert_recommend_strategy(strategy) if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - res: grpc.GroupsResult = ( - await self.grpc_points.RecommendGroups( - grpc.RecommendPointGroups( + result: grpc.QueryGroupsResponse = ( + await self.grpc_points.QueryGroups( + grpc.QueryPointGroups( collection_name=collection_name, - positive=positive_ids, - negative=negative_ids, + query=query, + prefetch=prefetch, filter=query_filter, - group_by=group_by, limit=limit, - group_size=group_size, with_vectors=with_vectors, with_payload=with_payload, params=search_params, score_threshold=score_threshold, using=using, - lookup_from=lookup_from, - read_consistency=consistency, + group_by=group_by, + group_size=group_size, with_lookup=with_lookup, - strategy=strategy, - positive_vectors=positive_vectors, - negative_vectors=negative_vectors, - shard_key_selector=shard_key_selector, + lookup_from=lookup_from, timeout=timeout, + shard_key_selector=shard_key_selector, + read_consistency=consistency, ), timeout=timeout if timeout is not None else self._timeout, ) ).result - assert res is not None, "Recommend groups API returned None" - return GrpcToRest.convert_groups_result(res) + return GrpcToRest.convert_groups_result(result) else: - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - positive = [ - GrpcToRest.convert_point_id(point_id) - if isinstance(point_id, grpc.PointId) - else point_id - for point_id in positive - ] - negative = [ - GrpcToRest.convert_point_id(point_id) - if isinstance(point_id, grpc.PointId) - else point_id - for point_id in negative - ] if isinstance(query_filter, grpc.Filter): query_filter = GrpcToRest.convert_filter(model=query_filter) if isinstance(search_params, grpc.SearchParams): @@ -1312,185 +621,134 @@ async def recommend_groups( with_payload = GrpcToRest.convert_with_payload_selector(with_payload) if isinstance(lookup_from, grpc.LookupLocation): lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - result = ( - await self.openapi_client.search_api.recommend_point_groups( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_groups_request=construct( - models.RecommendGroupsRequest, - positive=positive, - negative=negative, - filter=query_filter, - group_by=group_by, - limit=limit, - group_size=group_size, - params=search_params, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - lookup_from=lookup_from, - using=using, - with_lookup=with_lookup, - strategy=strategy, - shard_key=shard_key_selector, - ), - ) - ).result - assert result is not None, "Recommend points API returned None" - return result + query_request = models.QueryGroupsRequest( + shard_key=shard_key_selector, + prefetch=prefetch, + query=query, + using=using, + filter=query_filter, + params=search_params, + score_threshold=score_threshold, + limit=limit, + group_by=group_by, + group_size=group_size, + with_vector=with_vectors, + with_payload=with_payload, + with_lookup=with_lookup, + lookup_from=lookup_from, + ) + query_result = await self.http.search_api.query_points_groups( + collection_name=collection_name, + consistency=consistency, + timeout=timeout, + query_groups_request=query_request, + ) + assert query_result is not None, "Query points groups API returned None" + return query_result.result - async def discover( + async def search_matrix_pairs( self, collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, + limit: int = 3, + sample: int = 10, using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, consistency: Optional[types.ReadConsistency] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, - ) -> list[types.ScoredPoint]: - if context is None: - context = [] + ) -> types.SearchMatrixPairsResponse: if self._prefer_grpc: - target = ( - RestToGrpc.convert_target_vector(target) - if target is not None - and isinstance(target, get_args_subscribed(models.RecommendExample)) - else target - ) - context = [ - RestToGrpc.convert_context_example_pair(pair) - if isinstance(pair, models.ContextExamplePair) - else pair - for pair in context - ] if isinstance(query_filter, models.Filter): query_filter = RestToGrpc.convert_filter(model=query_filter) - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - res: grpc.SearchResponse = await self.grpc_points.Discover( - grpc.DiscoverPoints( + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) + response = await self.grpc_points.SearchMatrixPairs( + grpc.SearchMatrixPoints( collection_name=collection_name, - target=target, - context=context, filter=query_filter, + sample=sample, limit=limit, - offset=offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, using=using, - lookup_from=lookup_from, + timeout=timeout, read_consistency=consistency, shard_key_selector=shard_key_selector, - timeout=timeout, ), timeout=timeout if timeout is not None else self._timeout, ) - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] - else: - target = ( - GrpcToRest.convert_target_vector(target) - if target is not None and isinstance(target, grpc.TargetVector) - else target + return GrpcToRest.convert_search_matrix_pairs(response.result) + if isinstance(query_filter, grpc.Filter): + query_filter = GrpcToRest.convert_filter(model=query_filter) + search_matrix_result = ( + await self.openapi_client.search_api.search_matrix_pairs( + collection_name=collection_name, + consistency=consistency, + timeout=timeout, + search_matrix_request=models.SearchMatrixRequest( + shard_key=shard_key_selector, + limit=limit, + sample=sample, + using=using, + filter=query_filter, + ), ) - context = [ - GrpcToRest.convert_context_example_pair(pair) - if isinstance(pair, grpc.ContextExamplePair) - else pair - for pair in context - ] - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - result = ( - await self.openapi_client.search_api.discover_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - discover_request=models.DiscoverRequest( - target=target, - context=context, - filter=query_filter, - params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vector=with_vectors, - lookup_from=lookup_from, - using=using, - shard_key=shard_key_selector, - ), - ) - ).result - assert result is not None, "Discover points API returned None" - return result + ).result + assert search_matrix_result is not None, "Search matrix pairs returned None result" + return search_matrix_result - async def discover_batch( + async def search_matrix_offsets( self, collection_name: str, - requests: Sequence[types.DiscoverRequest], + query_filter: Optional[types.Filter] = None, + limit: int = 3, + sample: int = 10, + using: Optional[str] = None, consistency: Optional[types.ReadConsistency] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: + ) -> types.SearchMatrixOffsetsResponse: if self._prefer_grpc: - requests = [ - RestToGrpc.convert_discover_request(r, collection_name) - if isinstance(r, models.DiscoverRequest) - else r - for r in requests - ] - grpc_res: grpc.SearchBatchResponse = await self.grpc_points.DiscoverBatch( - grpc.DiscoverBatchPoints( + if isinstance(query_filter, models.Filter): + query_filter = RestToGrpc.convert_filter(model=query_filter) + if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): + shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) + response = await self.grpc_points.SearchMatrixOffsets( + grpc.SearchMatrixPoints( collection_name=collection_name, - discover_points=requests, - read_consistency=consistency, + filter=query_filter, + sample=sample, + limit=limit, + using=using, timeout=timeout, + read_consistency=consistency, + shard_key_selector=shard_key_selector, ), timeout=timeout if timeout is not None else self._timeout, ) - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - GrpcToRest.convert_discover_points(r) if isinstance(r, grpc.DiscoverPoints) else r - for r in requests - ] - http_res: list[list[models.ScoredPoint]] = ( - await self.http.search_api.discover_batch_points( - collection_name=collection_name, - discover_request_batch=models.DiscoverRequestBatch(searches=requests), - consistency=consistency, - timeout=timeout, - ) - ).result - return http_res + return GrpcToRest.convert_search_matrix_offsets(response.result) + if isinstance(query_filter, grpc.Filter): + query_filter = GrpcToRest.convert_filter(model=query_filter) + search_matrix_result = ( + await self.openapi_client.search_api.search_matrix_offsets( + collection_name=collection_name, + consistency=consistency, + timeout=timeout, + search_matrix_request=models.SearchMatrixRequest( + shard_key=shard_key_selector, + limit=limit, + sample=sample, + using=using, + filter=query_filter, + ), + ) + ).result + assert search_matrix_result is not None, "Search matrix offsets returned None result" + return search_matrix_result async def scroll( self, @@ -1577,6 +835,7 @@ async def count( exact: bool = True, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, + consistency: Optional[types.ReadConsistency] = None, **kwargs: Any, ) -> types.CountResult: if self._prefer_grpc: @@ -1584,6 +843,8 @@ async def count( count_filter = RestToGrpc.convert_filter(model=count_filter) if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) response = ( await self.grpc_points.Count( grpc.CountPoints( @@ -1592,6 +853,7 @@ async def count( exact=exact, shard_key_selector=shard_key_selector, timeout=timeout, + read_consistency=consistency, ), timeout=timeout if timeout is not None else self._timeout, ) @@ -1605,6 +867,7 @@ async def count( count_request=models.CountRequest( filter=count_filter, exact=exact, shard_key=shard_key_selector ), + consistency=consistency, timeout=timeout, ) ).result @@ -1672,6 +935,7 @@ async def upsert( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: if self._prefer_grpc: @@ -1700,6 +964,8 @@ async def upsert( ordering = RestToGrpc.convert_write_ordering(ordering) if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(update_filter, models.Filter): + update_filter = RestToGrpc.convert_filter(model=update_filter) grpc_result = ( await self.grpc_points.Upsert( grpc.UpsertPoints( @@ -1708,6 +974,7 @@ async def upsert( points=points, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ), timeout=self._timeout, ) @@ -1715,6 +982,8 @@ async def upsert( assert grpc_result is not None, "Upsert returned None result" return GrpcToRest.convert_update_result(grpc_result) else: + if isinstance(update_filter, grpc.Filter): + update_filter = GrpcToRest.convert_filter(model=update_filter) if isinstance(points, list): points = [ GrpcToRest.convert_point_struct(point) @@ -1722,9 +991,13 @@ async def upsert( else point for point in points ] - points = models.PointsList(points=points, shard_key=shard_key_selector) + points = models.PointsList( + points=points, shard_key=shard_key_selector, update_filter=update_filter + ) if isinstance(points, models.Batch): - points = models.PointsBatch(batch=points, shard_key=shard_key_selector) + points = models.PointsBatch( + batch=points, shard_key=shard_key_selector, update_filter=update_filter + ) http_result = ( await self.openapi_client.points_api.upsert_points( collection_name=collection_name, @@ -1743,6 +1016,7 @@ async def update_vectors( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: if self._prefer_grpc: @@ -1751,6 +1025,8 @@ async def update_vectors( ordering = RestToGrpc.convert_write_ordering(ordering) if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(update_filter, models.Filter): + update_filter = RestToGrpc.convert_filter(model=update_filter) grpc_result = ( await self.grpc_points.UpdateVectors( grpc.UpdatePointVectors( @@ -1759,6 +1035,7 @@ async def update_vectors( points=points, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ), timeout=self._timeout, ) @@ -1766,12 +1043,14 @@ async def update_vectors( assert grpc_result is not None, "Upsert returned None result" return GrpcToRest.convert_update_result(grpc_result) else: + if isinstance(update_filter, grpc.Filter): + update_filter = GrpcToRest.convert_filter(model=update_filter) return ( await self.openapi_client.points_api.update_vectors( collection_name=collection_name, wait=wait, update_vectors=models.UpdateVectors( - points=points, shard_key=shard_key_selector + points=points, shard_key=shard_key_selector, update_filter=update_filter ), ordering=ordering, ) @@ -2414,6 +1693,7 @@ async def update_collection( timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: if self._prefer_grpc: @@ -2435,6 +1715,8 @@ async def update_collection( ) if isinstance(strict_mode_config, models.StrictModeConfig): strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config) + if isinstance(metadata, dict): + metadata = RestToGrpc.convert_payload(metadata) return ( await self.grpc_collections.Update( grpc.UpdateCollection( @@ -2447,6 +1729,7 @@ async def update_collection( sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, timeout=timeout, + metadata=metadata, ), timeout=timeout if timeout is not None else self._timeout, ) @@ -2472,6 +1755,7 @@ async def update_collection( quantization_config=quantization_config, sparse_vectors=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, ), timeout=timeout, ) @@ -2509,20 +1793,13 @@ async def create_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, sharding_method: Optional[types.ShardingMethod] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: - if init_from is not None: - show_warning_once( - message="init_from is deprecated", - category=DeprecationWarning, - stacklevel=5, - idx="create-collection-init-from", - ) if self._prefer_grpc: if isinstance(vectors_config, (models.VectorParams, dict)): vectors_config = RestToGrpc.convert_vectors_config(vectors_config) @@ -2534,8 +1811,6 @@ async def create_collection( wal_config = RestToGrpc.convert_wal_config_diff(wal_config) if isinstance(quantization_config, get_args(models.QuantizationConfig)): quantization_config = RestToGrpc.convert_quantization_config(quantization_config) - if isinstance(init_from, models.InitFrom): - init_from = RestToGrpc.convert_init_from(init_from) if isinstance(sparse_vectors_config, dict): sparse_vectors_config = RestToGrpc.convert_sparse_vector_config( sparse_vectors_config @@ -2544,6 +1819,8 @@ async def create_collection( sharding_method = RestToGrpc.convert_sharding_method(sharding_method) if isinstance(strict_mode_config, models.StrictModeConfig): strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config) + if isinstance(metadata, dict): + metadata = RestToGrpc.convert_payload(metadata) create_collection = grpc.CreateCollection( collection_name=collection_name, hnsw_config=hnsw_config, @@ -2555,11 +1832,11 @@ async def create_collection( vectors_config=vectors_config, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, - init_from_collection=init_from, quantization_config=quantization_config, sparse_vectors_config=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) return ( await self.grpc_collections.Create(create_collection, timeout=self._timeout) @@ -2572,8 +1849,6 @@ async def create_collection( wal_config = GrpcToRest.convert_wal_config_diff(wal_config) if isinstance(quantization_config, grpc.QuantizationConfig): quantization_config = GrpcToRest.convert_quantization_config(quantization_config) - if isinstance(init_from, str): - init_from = GrpcToRest.convert_init_from(init_from) create_collection_request = models.CreateCollection( vectors=vectors_config, shard_number=shard_number, @@ -2584,10 +1859,10 @@ async def create_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, sparse_vectors=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) result: Optional[bool] = ( await self.http.collections_api.create_collection( @@ -2611,11 +1886,11 @@ async def recreate_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, sharding_method: Optional[types.ShardingMethod] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: await self.delete_collection(collection_name, timeout=timeout) @@ -2630,11 +1905,11 @@ async def recreate_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) @property @@ -2653,6 +1928,7 @@ def _upload_collection( method: Optional[str] = None, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, ) -> None: if method is not None: if method in get_all_start_methods(): @@ -2675,6 +1951,7 @@ def _upload_collection( "shard_key_selector": shard_key_selector, "options": self._grpc_options, "timeout": self._timeout, + "update_filter": update_filter, } else: updater_kwargs = { @@ -2683,6 +1960,7 @@ def _upload_collection( "max_retries": max_retries, "wait": wait, "shard_key_selector": shard_key_selector, + "update_filter": update_filter, **self._rest_args, } if parallel == 1: @@ -2694,31 +1972,6 @@ def _upload_collection( for _ in pool.unordered_map(batches_iterator, **updater_kwargs): pass - def upload_records( - self, - collection_name: str, - records: Iterable[types.Record], - batch_size: int = 64, - parallel: int = 1, - method: Optional[str] = None, - max_retries: int = 3, - wait: bool = False, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> None: - batches_iterator = self._updater_class.iterate_records_batches( - records=records, batch_size=batch_size - ) - self._upload_collection( - batches_iterator=batches_iterator, - collection_name=collection_name, - max_retries=max_retries, - parallel=parallel, - method=method, - shard_key_selector=shard_key_selector, - wait=wait, - ) - def upload_points( self, collection_name: str, @@ -2729,6 +1982,7 @@ def upload_points( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: batches_iterator = self._updater_class.iterate_records_batches( @@ -2742,6 +1996,7 @@ def upload_points( method=method, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def upload_collection( @@ -2758,6 +2013,7 @@ def upload_collection( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: batches_iterator = self._updater_class.iterate_batches( @@ -2771,6 +2027,7 @@ def upload_collection( method=method, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) async def create_payload_index( @@ -3048,29 +2305,6 @@ async def recover_shard_snapshot( ) ).result - async def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = ( - await self.openapi_client.service_api.post_locks( - models.LocksOption(error_message=reason, write=True) - ) - ).result - assert result is not None, "Lock storage returned None" - return result - - async def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = ( - await self.openapi_client.service_api.post_locks(models.LocksOption(write=False)) - ).result - assert result is not None, "Post locks returned None" - return result - - async def get_locks(self, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = ( - await self.openapi_client.service_api.get_locks() - ).result - assert result is not None, "Get locks returned None" - return result - async def create_shard_key( self, collection_name: str, @@ -3078,12 +2312,15 @@ async def create_shard_key( shards_number: Optional[int] = None, replication_factor: Optional[int] = None, placement: Optional[list[int]] = None, + initial_state: Optional[types.ReplicaState] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> bool: if self._prefer_grpc: if isinstance(shard_key, get_args_subscribed(models.ShardKey)): shard_key = RestToGrpc.convert_shard_key(shard_key) + if isinstance(initial_state, models.ReplicaState): + initial_state = RestToGrpc.convert_replica_state(initial_state) return ( await self.grpc_collections.CreateShardKey( grpc.CreateShardKeyRequest( @@ -3094,6 +2331,7 @@ async def create_shard_key( shards_number=shards_number, replication_factor=replication_factor, placement=placement or [], + initial_state=initial_state, ), ), timeout=timeout if timeout is not None else self._timeout, @@ -3109,6 +2347,7 @@ async def create_shard_key( shards_number=shards_number, replication_factor=replication_factor, placement=placement, + initial_state=initial_state, ), ) ).result diff --git a/qdrant_client/client_base.py b/qdrant_client/client_base.py index eb121af1..2f533d86 100644 --- a/qdrant_client/client_base.py +++ b/qdrant_client/client_base.py @@ -1,65 +1,12 @@ from typing import Any, Iterable, Mapping, Optional, Sequence, Union from qdrant_client.conversions import common_types as types -from qdrant_client.http import models class QdrantBase: def __init__(self, **kwargs: Any): pass - def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - - def search( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - ], - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - def search_groups( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - ], - group_by: str, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - **kwargs: Any, - ) -> types.GroupsResult: - raise NotImplementedError() - def search_matrix_offsets( self, collection_name: str, @@ -150,80 +97,6 @@ def query_points_groups( ) -> types.GroupsResult: raise NotImplementedError() - def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - - def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[models.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> types.GroupsResult: - raise NotImplementedError() - - def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - raise NotImplementedError() - - def discover_batch( - self, - collection_name: str, - requests: Sequence[types.DiscoverRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - raise NotImplementedError() - def scroll( self, collection_name: str, @@ -394,14 +267,6 @@ def recreate_collection( ) -> bool: raise NotImplementedError() - def upload_records( - self, - collection_name: str, - records: Iterable[types.Record], - **kwargs: Any, - ) -> None: - raise NotImplementedError() - def upload_points( self, collection_name: str, @@ -496,15 +361,6 @@ def recover_shard_snapshot( ) -> Optional[bool]: raise NotImplementedError() - def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - - def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - - def get_locks(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError() - def close(self, **kwargs: Any) -> None: pass diff --git a/qdrant_client/conversions/common_types.py b/qdrant_client/conversions/common_types.py index e251d239..a912f285 100644 --- a/qdrant_client/conversions/common_types.py +++ b/qdrant_client/conversions/common_types.py @@ -9,6 +9,7 @@ from typing_extensions import TypeAlias from typing import Union, get_args, Sequence +from uuid import UUID from qdrant_client import grpc from qdrant_client.http import models as rest @@ -56,7 +57,7 @@ def get_args_subscribed(tp): # type: ignore CollectionParamsDiff = Union[rest.CollectionParamsDiff, grpc.CollectionParamsDiff] WalConfigDiff = Union[rest.WalConfigDiff, grpc.WalConfigDiff] QuantizationConfig = Union[rest.QuantizationConfig, grpc.QuantizationConfig] -PointId = Union[int, str, grpc.PointId] +PointId = Union[int, str, UUID, grpc.PointId] PayloadSchemaType = Union[ rest.PayloadSchemaType, rest.PayloadSchemaParams, @@ -75,9 +76,6 @@ def get_args_subscribed(tp): # type: ignore ] LookupLocation = Union[rest.LookupLocation, grpc.LookupLocation] RecommendStrategy: TypeAlias = rest.RecommendStrategy -RecommendExample: TypeAlias = rest.RecommendExample -TargetVector = Union[rest.RecommendExample, grpc.TargetVector] -ContextExamplePair = Union[rest.ContextExamplePair, grpc.ContextExamplePair] OrderBy = Union[rest.OrderByInterface, grpc.OrderBy] ShardingMethod: TypeAlias = rest.ShardingMethod ShardKey: TypeAlias = rest.ShardKey @@ -107,10 +105,8 @@ def get_args_subscribed(tp): # type: ignore VectorStruct: TypeAlias = rest.VectorStruct VectorParams: TypeAlias = rest.VectorParams SparseVectorParams: TypeAlias = rest.SparseVectorParams -LocksOption: TypeAlias = rest.LocksOption SnapshotPriority: TypeAlias = rest.SnapshotPriority CollectionsAliasesResponse: TypeAlias = rest.CollectionsAliasesResponse -InitFrom: TypeAlias = Union[rest.InitFrom, str] UpdateOperation: TypeAlias = rest.UpdateOperation Query: TypeAlias = rest.Query Prefetch: TypeAlias = rest.Prefetch @@ -119,10 +115,7 @@ def get_args_subscribed(tp): # type: ignore InferenceObject: TypeAlias = rest.InferenceObject StrictModeConfig: TypeAlias = rest.StrictModeConfig -SearchRequest = Union[rest.SearchRequest, grpc.SearchPoints] -RecommendRequest = Union[rest.RecommendRequest, grpc.RecommendPoints] -DiscoverRequest = Union[rest.DiscoverRequest, grpc.DiscoverPoints] -QueryRequest = Union[rest.QueryRequest, grpc.QueryPoints] +QueryRequest: TypeAlias = rest.QueryRequest Mmr: TypeAlias = rest.Mmr @@ -142,6 +135,7 @@ def get_args_subscribed(tp): # type: ignore VersionInfo: TypeAlias = rest.VersionInfo +ReplicaState: TypeAlias = rest.ReplicaState # we can't use `nptyping` package due to numpy/python-version incompatibilities # thus we need to define precise type annotations while we support python3.7 _np_numeric = Union[ diff --git a/qdrant_client/conversions/conversion.py b/qdrant_client/conversions/conversion.py index 3638526c..19c9cd31 100644 --- a/qdrant_client/conversions/conversion.py +++ b/qdrant_client/conversions/conversion.py @@ -1,3 +1,4 @@ +import uuid from datetime import date, datetime, timezone from typing import Any, Mapping, Optional, Sequence, Union, get_args @@ -236,7 +237,6 @@ def convert_collection_info(cls, model: grpc.CollectionInfo) -> rest.CollectionI payload_schema=cls.convert_payload_schema(model.payload_schema), segments_count=model.segments_count, status=cls.convert_collection_status(model.status), - vectors_count=model.vectors_count if model.HasField("vectors_count") else None, points_count=model.points_count, indexed_vectors_count=model.indexed_vectors_count or 0, ) @@ -265,6 +265,7 @@ def convert_collection_config(cls, model: grpc.CollectionConfig) -> rest.Collect if model.HasField("strict_mode_config") else None ), + metadata=cls.convert_payload(model.metadata) if model.metadata is not None else None, ) @classmethod @@ -280,6 +281,7 @@ def convert_hnsw_config_diff(cls, model: grpc.HnswConfigDiff) -> rest.HnswConfig ), on_disk=model.on_disk if model.HasField("on_disk") else None, payload_m=model.payload_m if model.HasField("payload_m") else None, + inline_storage=model.inline_storage if model.HasField("inline_storage") else None, ) @classmethod @@ -295,6 +297,7 @@ def convert_hnsw_config(cls, model: grpc.HnswConfigDiff) -> rest.HnswConfig: ), on_disk=model.on_disk if model.HasField("on_disk") else None, payload_m=model.payload_m if model.HasField("payload_m") else None, + inline_storage=model.inline_storage if model.HasField("inline_storage") else None, ) @classmethod @@ -535,6 +538,16 @@ def convert_search_params(cls, model: grpc.SearchParams) -> rest.SearchParams: else None ), indexed_only=model.indexed_only if model.HasField("indexed_only") else None, + acorn=cls.convert_acorn_search_params(model.acorn) + if model.HasField("acorn") + else None, + ) + + @classmethod + def convert_acorn_search_params(cls, model: grpc.AcornSearchParams) -> rest.AcornSearchParams: + return rest.AcornSearchParams( + enable=model.enable if model.HasField("enable") else None, + max_selectivity=model.max_selectivity if model.HasField("max_selectivity") else None, ) @classmethod @@ -676,6 +689,8 @@ def convert_match(cls, model: grpc.Match) -> rest.Match: return rest.MatchExcept(**{"except": list(val.integers)}) if name == "phrase": return rest.MatchPhrase(phrase=val) + if name == "text_any": + return rest.MatchTextAny(text_any=val) raise ValueError(f"invalid Match model: {model}") # pragma: no cover @classmethod @@ -787,6 +802,7 @@ def convert_update_collection(cls, model: grpc.UpdateCollection) -> rest.UpdateC if model.HasField("quantization_config") else None ), + metadata=(cls.convert_payload(model.metadata) if model.metadata is not None else None), ) @classmethod @@ -995,6 +1011,7 @@ def _convert_vector( otherwise it's propagated for further processing along with the raw value """ name = model.WhichOneof("vector") + # region deprecated if name is None: if model.HasField("indices"): return None, rest.SparseVector(indices=model.indices.data[:], values=model.data[:]) @@ -1005,6 +1022,7 @@ def _convert_vector( return None, [vectors[i : i + step] for i in range(0, len(vectors), step)] return None, model.data[:] + # endregion val = getattr(model, name) if name == "dense": @@ -1350,6 +1368,10 @@ def convert_query(cls, model: grpc.Query) -> rest.Query: nearest=cls.convert_vector_input(val.nearest), mmr=cls.convert_mmr(val.mmr) ) + if name == "rrf": + rrf = model.rrf + return rest.RrfQuery(rrf=rest.Rrf(k=rrf.k if rrf.HasField("k") else None)) + raise ValueError(f"invalid Query model: {model}") # pragma: no cover @classmethod @@ -1390,44 +1412,6 @@ def convert_with_vectors_selector(cls, model: grpc.WithVectorsSelector) -> rest. return cls.convert_vectors_selector(val) raise ValueError(f"invalid WithVectorsSelector model: {model}") # pragma: no cover - @classmethod - def convert_search_points(cls, model: grpc.SearchPoints) -> rest.SearchRequest: - vector = ( - rest.NamedVector(name=model.vector_name, vector=model.vector[:]) - if not model.HasField("sparse_indices") - else ( - rest.NamedSparseVector( - name=model.vector_name, - vector=rest.SparseVector( - indices=model.sparse_indices.data[:], values=model.vector[:] - ), - ) - ) - ) - return rest.SearchRequest( - vector=vector, - filter=cls.convert_filter(model.filter) if model.HasField("filter") else None, - limit=model.limit, - with_payload=( - cls.convert_with_payload_interface(model.with_payload) - if model.HasField("with_payload") - else None - ), - params=cls.convert_search_params(model.params) if model.HasField("params") else None, - score_threshold=model.score_threshold if model.HasField("score_threshold") else None, - offset=model.offset if model.HasField("offset") else None, - with_vector=( - cls.convert_with_vectors_selector(model.with_vectors) - if model.HasField("with_vectors") - else None - ), - shard_key=( - cls.convert_shard_key_selector(model.shard_key_selector) - if model.HasField("shard_key_selector") - else None - ), - ) - @classmethod def convert_query_points(cls, model: grpc.QueryPoints) -> rest.QueryRequest: return rest.QueryRequest( @@ -1465,109 +1449,6 @@ def convert_query_points(cls, model: grpc.QueryPoints) -> rest.QueryRequest: ), ) - @classmethod - def convert_recommend_points(cls, model: grpc.RecommendPoints) -> rest.RecommendRequest: - positive_ids = [cls.convert_point_id(point_id) for point_id in model.positive] - negative_ids = [cls.convert_point_id(point_id) for point_id in model.negative] - - positive_vectors = [cls.convert_vector(vector) for vector in model.positive_vectors] - negative_vectors = [cls.convert_vector(vector) for vector in model.negative_vectors] - - return rest.RecommendRequest( - positive=positive_ids + positive_vectors, - negative=negative_ids + negative_vectors, - filter=cls.convert_filter(model.filter) if model.HasField("filter") else None, - limit=model.limit, - with_payload=( - cls.convert_with_payload_interface(model.with_payload) - if model.HasField("with_payload") - else None - ), - params=cls.convert_search_params(model.params) if model.HasField("params") else None, - score_threshold=model.score_threshold if model.HasField("score_threshold") else None, - offset=model.offset if model.HasField("offset") else None, - with_vector=( - cls.convert_with_vectors_selector(model.with_vectors) - if model.HasField("with_vectors") - else None - ), - using=model.using, - lookup_from=( - cls.convert_lookup_location(model.lookup_from) - if model.HasField("lookup_from") - else None - ), - strategy=( - cls.convert_recommend_strategy(model.strategy) - if model.HasField("strategy") - else None - ), - shard_key=( - cls.convert_shard_key_selector(model.shard_key_selector) - if model.HasField("shard_key_selector") - else None - ), - ) - - @classmethod - def convert_discover_points(cls, model: grpc.DiscoverPoints) -> rest.DiscoverRequest: - target = cls.convert_target_vector(model.target) if model.HasField("target") else None - context = [cls.convert_context_example_pair(pair) for pair in model.context] - return rest.DiscoverRequest( - target=target, - context=context, - filter=cls.convert_filter(model.filter) if model.HasField("filter") else None, - limit=model.limit, - with_payload=( - cls.convert_with_payload_interface(model.with_payload) - if model.HasField("with_payload") - else None - ), - params=cls.convert_search_params(model.params) if model.HasField("params") else None, - offset=model.offset if model.HasField("offset") else None, - with_vector=( - cls.convert_with_vectors_selector(model.with_vectors) - if model.HasField("with_vectors") - else None - ), - using=model.using, - lookup_from=( - cls.convert_lookup_location(model.lookup_from) - if model.HasField("lookup_from") - else None - ), - shard_key=( - cls.convert_shard_key_selector(model.shard_key_selector) - if model.HasField("shard_key_selector") - else None - ), - ) - - @classmethod - def convert_vector_example(cls, model: grpc.VectorExample) -> rest.RecommendExample: - if model.HasField("vector"): - return cls.convert_vector(model.vector) - if model.HasField("id"): - return cls.convert_point_id(model.id) - - raise ValueError(f"invalid VectorExample model: {model}") # pragma: no cover - - @classmethod - def convert_target_vector(cls, model: grpc.TargetVector) -> rest.RecommendExample: - if model.HasField("single"): - return cls.convert_vector_example(model.single) - - raise ValueError(f"invalid TargetVector model: {model}") # pragma: no cover - - @classmethod - def convert_context_example_pair( - cls, model: grpc.ContextExamplePair - ) -> rest.ContextExamplePair: - return rest.ContextExamplePair( - positive=cls.convert_vector_example(model.positive), - negative=cls.convert_vector_example(model.negative), - ) - @classmethod def convert_tokenizer_type(cls, model: grpc.TokenizerType) -> rest.TokenizerType: if model == grpc.Unknown: @@ -1597,6 +1478,7 @@ def convert_text_index_params(cls, model: grpc.TextIndexParams) -> rest.TextInde else None, on_disk=model.on_disk if model.HasField("on_disk") else None, stemmer=cls.convert_stemmer(model.stemmer) if model.HasField("stemmer") else None, + ascii_folding=model.ascii_folding if model.HasField("ascii_folding") else None, ) @classmethod @@ -1970,10 +1852,14 @@ def convert_points_update_operation( if val.HasField("shard_key_selector") else None ) + update_filter = ( + cls.convert_filter(val.update_filter) if val.HasField("update_filter") else None + ) return rest.UpsertOperation( upsert=rest.PointsList( points=[cls.convert_point_struct(point) for point in val.points], shard_key=shard_key_selector, + update_filter=update_filter, ) ) elif name == "delete_points": @@ -2075,10 +1961,14 @@ def convert_points_update_operation( if val.HasField("shard_key_selector") else None ) + update_filter = ( + cls.convert_filter(val.update_filter) if val.HasField("update_filter") else None + ) return rest.UpdateVectorsOperation( update_vectors=rest.UpdateVectors( points=[cls.convert_point_vectors(point) for point in val.points], shard_key=shard_key_selector, + update_filter=update_filter, ) ) elif name == "delete_vectors": @@ -2109,12 +1999,6 @@ def convert_points_update_operation( else: raise ValueError(f"invalid UpdateOperation model: {model}") # pragma: no cover - @classmethod - def convert_init_from(cls, model: str) -> rest.InitFrom: - if isinstance(model, str): - return rest.InitFrom(collection=model) - raise ValueError(f"Invalid InitFrom model: {model}") # pragma: no cover - @classmethod def convert_recommend_strategy(cls, model: grpc.RecommendStrategy) -> rest.RecommendStrategy: if model == grpc.RecommendStrategy.AverageVector: @@ -2172,10 +2056,60 @@ def convert_shard_key(cls, model: grpc.ShardKey) -> rest.ShardKey: val = getattr(model, name) return val + @classmethod + def convert_replica_state(cls, model: grpc.ReplicaState) -> rest.ReplicaState: + if model == grpc.ReplicaState.Active: + return rest.ReplicaState.ACTIVE + + if model == grpc.ReplicaState.Dead: + return rest.ReplicaState.DEAD + + if model == grpc.ReplicaState.Partial: + return rest.ReplicaState.PARTIAL + + if model == grpc.ReplicaState.Initializing: + return rest.ReplicaState.INITIALIZING + + if model == grpc.ReplicaState.Listener: + return rest.ReplicaState.LISTENER + + if model == grpc.ReplicaState.PartialSnapshot: + return rest.ReplicaState.PARTIALSNAPSHOT + + if model == grpc.ReplicaState.Recovery: + return rest.ReplicaState.RECOVERY + + if model == grpc.ReplicaState.Resharding: + return rest.ReplicaState.RESHARDING + + if model == grpc.ReplicaState.ReshardingScaleDown: + return rest.ReplicaState.RESHARDINGSCALEDOWN + + if model == grpc.ReplicaState.ActiveRead: + return rest.ReplicaState.ACTIVEREAD + + raise ValueError(f"invalid ReplicaState model: {model}") # pragma: no cover + @classmethod def convert_shard_key_selector(cls, model: grpc.ShardKeySelector) -> rest.ShardKeySelector: + fallback = None + if model.HasField("fallback"): + fallback = model.fallback + if len(model.shard_keys) == 1: - return cls.convert_shard_key(model.shard_keys[0]) + return ( + cls.convert_shard_key(model.shard_keys[0]) + if fallback is None + else rest.ShardKeyWithFallback( + target=cls.convert_shard_key(model.shard_keys[0]), + fallback=cls.convert_shard_key(model.fallback), + ) + ) + elif fallback: + raise ValueError( + f"Fallback shard key {fallback} can only be set when a single shard key is provided" + ) + return [cls.convert_shard_key(shard_key) for shard_key in model.shard_keys] @classmethod @@ -2363,6 +2297,9 @@ def convert_strict_mode_config(cls, model: grpc.StrictModeConfig) -> rest.Strict if model.HasField("sparse_config") else None ), + max_payload_index_count=model.max_payload_index_count + if model.HasField("max_payload_index_count") + else None, ) @classmethod @@ -2430,6 +2367,9 @@ def convert_strict_mode_config_output( if model.HasField("sparse_config") else None ), + max_payload_index_count=model.max_payload_index_count + if model.HasField("max_payload_index_count") + else None, ) @classmethod @@ -2559,7 +2499,6 @@ def convert_collection_info(cls, model: rest.CollectionInfo) -> grpc.CollectionI ), segments_count=model.segments_count, status=cls.convert_collection_status(model.status), - vectors_count=model.vectors_count if model.vectors_count is not None else None, points_count=model.points_count, ) @@ -2724,6 +2663,16 @@ def convert_search_params(cls, model: rest.SearchParams) -> grpc.SearchParams: else None ), indexed_only=model.indexed_only, + acorn=( + cls.convert_acorn_search_params(model.acorn) if model.acorn is not None else None + ), + ) + + @classmethod + def convert_acorn_search_params(cls, model: rest.AcornSearchParams) -> grpc.AcornSearchParams: + return grpc.AcornSearchParams( + enable=model.enable if model.enable is not None else None, + max_selectivity=model.max_selectivity if model.max_selectivity is not None else None, ) @classmethod @@ -2791,6 +2740,7 @@ def convert_hnsw_config_diff(cls, model: rest.HnswConfigDiff) -> grpc.HnswConfig max_indexing_threads=model.max_indexing_threads, on_disk=model.on_disk, payload_m=model.payload_m, + inline_storage=model.inline_storage, ) @classmethod @@ -2853,6 +2803,7 @@ def convert_collection_config(cls, model: rest.CollectionConfig) -> grpc.Collect if model.strict_mode_config is not None else None ), + metadata=cls.convert_payload(model.metadata) if model.metadata is not None else None, ) @classmethod @@ -2864,6 +2815,7 @@ def convert_hnsw_config(cls, model: rest.HnswConfig) -> grpc.HnswConfigDiff: max_indexing_threads=model.max_indexing_threads, on_disk=model.on_disk, payload_m=model.payload_m, + inline_storage=model.inline_storage, ) @classmethod @@ -2992,6 +2944,7 @@ def convert_update_collection( if model.quantization_config is not None else None ), + metadata=(cls.convert_payload(model.metadata) if model.metadata is not None else None), ) @classmethod @@ -3027,6 +2980,8 @@ def convert_match(cls, model: rest.Match) -> grpc.Match: raise ValueError(f"invalid MatchExcept model: {model}") # pragma: no cover if isinstance(model, rest.MatchPhrase): return grpc.Match(phrase=model.phrase) + if isinstance(model, rest.MatchTextAny): + return grpc.Match(text_any=model.text_any) raise ValueError(f"invalid Match model: {model}") # pragma: no cover @classmethod @@ -3047,89 +3002,30 @@ def convert_alias_description(cls, model: rest.AliasDescription) -> grpc.AliasDe collection_name=model.collection_name, ) - @classmethod - def convert_recommend_examples_to_ids( - cls, examples: Sequence[rest.RecommendExample] - ) -> list[grpc.PointId]: - ids: list[grpc.PointId] = [] - for example in examples: - if isinstance(example, get_args_subscribed(rest.ExtendedPointId)): - id_ = cls.convert_extended_point_id(example) - elif isinstance(example, grpc.PointId): - id_ = example - else: - continue - - ids.append(id_) - - return ids - - @classmethod - def convert_recommend_examples_to_vectors( - cls, examples: Sequence[rest.RecommendExample] - ) -> list[grpc.Vector]: - vectors: list[grpc.Vector] = [] - for example in examples: - if isinstance(example, grpc.Vector): - vector = example - elif isinstance(example, list): - vector = grpc.Vector(data=example) - elif isinstance(example, rest.SparseVector): - vector = cls.convert_sparse_vector_to_vector(example) - else: - continue - - vectors.append(vector) - - return vectors - - @classmethod - def convert_vector_example(cls, model: rest.RecommendExample) -> grpc.VectorExample: - return cls.convert_recommend_example(model) - - @classmethod - def convert_recommend_example(cls, model: rest.RecommendExample) -> grpc.VectorExample: - if isinstance(model, get_args_subscribed(rest.ExtendedPointId)): - return grpc.VectorExample(id=cls.convert_extended_point_id(model)) - if isinstance(model, rest.SparseVector): - return grpc.VectorExample(vector=cls.convert_sparse_vector_to_vector(model)) - if isinstance(model, list): - return grpc.VectorExample(vector=grpc.Vector(data=model)) - - raise ValueError(f"Invalid RecommendExample model: {model}") # pragma: no cover - @classmethod def convert_sparse_vector_to_vector(cls, model: rest.SparseVector) -> grpc.Vector: return grpc.Vector( - data=model.values, - indices=grpc.SparseIndices(data=model.indices), + sparse=grpc.SparseVector( + values=model.values, + indices=model.indices, + ) ) @classmethod def convert_sparse_vector_to_vector_output(cls, model: rest.SparseVector) -> grpc.VectorOutput: return grpc.VectorOutput( - data=model.values, - indices=grpc.SparseIndices(data=model.indices), - ) - - @classmethod - def convert_target_vector(cls, model: rest.RecommendExample) -> grpc.TargetVector: - return grpc.TargetVector(single=cls.convert_recommend_example(model)) - - @classmethod - def convert_context_example_pair( - cls, - model: rest.ContextExamplePair, - ) -> grpc.ContextExamplePair: - return grpc.ContextExamplePair( - positive=cls.convert_recommend_example(model.positive), - negative=cls.convert_recommend_example(model.negative), + sparse=grpc.SparseVector( + values=model.values, + indices=model.indices, + ) ) @classmethod def convert_extended_point_id(cls, model: rest.ExtendedPointId) -> grpc.PointId: if isinstance(model, int): return grpc.PointId(num=model) + if isinstance(model, uuid.UUID): + model = str(model) if isinstance(model, str): return grpc.PointId(uuid=model) raise ValueError(f"invalid ExtendedPointId model: {model}") # pragma: no cover @@ -3362,14 +3258,14 @@ def convert_vector( vector[0], list ): # we can't say whether it is an empty dense or multi-dense vector return grpc.Vector( - data=[ - inner_vector - for multi_vector in vector - for inner_vector in multi_vector # type: ignore - ], - vectors_count=len(vector), + multi_dense=grpc.MultiDenseVector( + vectors=[ + grpc.DenseVector(data=inner_vector) # type: ignore[union-attr] + for inner_vector in vector + ] + ) ) - return grpc.Vector(data=vector) + return grpc.Vector(dense=grpc.DenseVector(data=vector)) if isinstance(model, list): return grpc.Vectors(vector=convert_vector(model)) @@ -3405,14 +3301,14 @@ def convert_vector( vector[0], list ): # we can't say whether it is an empty dense or multi-dense vector return grpc.VectorOutput( - data=[ - inner_vector - for multi_vector in vector - for inner_vector in multi_vector # type: ignore - ], - vectors_count=len(vector), + multi_dense=grpc.MultiDenseVector( + vectors=[ + grpc.DenseVector(data=inner_vector) # type: ignore[union-attr] + for inner_vector in vector + ] + ) ) - return grpc.VectorOutput(data=vector) + return grpc.VectorOutput(dense=grpc.DenseVector(data=vector)) if isinstance(model, list): return grpc.VectorsOutput(vector=convert_vector(model)) @@ -3625,6 +3521,12 @@ def convert_query(cls, model: rest.Query) -> grpc.Query: if isinstance(model, rest.FormulaQuery): return grpc.Query(formula=cls.convert_formula_query(model)) + if isinstance(model, rest.RrfQuery): + rrf = grpc.Rrf() + if model.rrf.k is not None: + rrf.k = model.rrf.k + return grpc.Query(rrf=rrf) + raise ValueError(f"invalid Query model: {model}") # pragma: no cover @classmethod @@ -3752,43 +3654,6 @@ def convert_prefetch_query(cls, model: rest.Prefetch) -> grpc.PrefetchQuery: ), ) - @classmethod - def convert_search_request( - cls, model: rest.SearchRequest, collection_name: str - ) -> grpc.SearchPoints: - vector, sparse_indices, name = cls.convert_named_vector_struct(model.vector) - - return grpc.SearchPoints( - collection_name=collection_name, - vector=vector, - sparse_indices=sparse_indices, - filter=cls.convert_filter(model.filter) if model.filter is not None else None, - limit=model.limit, - with_payload=( - cls.convert_with_payload_interface(model.with_payload) - if model.with_payload is not None - else None - ), - params=cls.convert_search_params(model.params) if model.params is not None else None, - score_threshold=model.score_threshold, - offset=model.offset, - vector_name=name, - with_vectors=( - cls.convert_with_vectors(model.with_vector) - if model.with_vector is not None - else None - ), - shard_key_selector=( - cls.convert_shard_key_selector(model.shard_key) if model.shard_key else None - ), - ) - - @classmethod - def convert_search_points( - cls, model: rest.SearchRequest, collection_name: str - ) -> grpc.SearchPoints: - return cls.convert_search_request(model, collection_name) - @classmethod def convert_query_request( cls, model: rest.QueryRequest, collection_name: str @@ -3838,114 +3703,6 @@ def convert_query_points( ) -> grpc.QueryPoints: return cls.convert_query_request(model, collection_name) - @classmethod - def convert_recommend_request( - cls, model: rest.RecommendRequest, collection_name: str - ) -> grpc.RecommendPoints: - positive_ids = cls.convert_recommend_examples_to_ids(model.positive) - negative_ids = cls.convert_recommend_examples_to_ids(model.negative) - - positive_vectors = cls.convert_recommend_examples_to_vectors(model.positive) - negative_vectors = cls.convert_recommend_examples_to_vectors(model.negative) - - return grpc.RecommendPoints( - collection_name=collection_name, - positive=positive_ids, - negative=negative_ids, - filter=cls.convert_filter(model.filter) if model.filter is not None else None, - limit=model.limit, - with_payload=( - cls.convert_with_payload_interface(model.with_payload) - if model.with_payload is not None - else None - ), - params=cls.convert_search_params(model.params) if model.params is not None else None, - score_threshold=model.score_threshold, - offset=model.offset, - with_vectors=( - cls.convert_with_vectors(model.with_vector) - if model.with_vector is not None - else None - ), - using=model.using, - lookup_from=( - cls.convert_lookup_location(model.lookup_from) - if model.lookup_from is not None - else None - ), - strategy=( - cls.convert_recommend_strategy(model.strategy) - if model.strategy is not None - else None - ), - positive_vectors=positive_vectors, - negative_vectors=negative_vectors, - shard_key_selector=( - cls.convert_shard_key_selector(model.shard_key) if model.shard_key else None - ), - ) - - @classmethod - def convert_discover_points( - cls, model: rest.DiscoverRequest, collection_name: str - ) -> grpc.DiscoverPoints: - return cls.convert_discover_request(model, collection_name) - - @classmethod - def convert_discover_request( - cls, model: rest.DiscoverRequest, collection_name: str - ) -> grpc.DiscoverPoints: - target = cls.convert_target_vector(model.target) if model.target is not None else None - - context = ( - [cls.convert_context_example_pair(pair) for pair in model.context] - if model.context is not None - else None - ) - - query_filter = None if model.filter is None else cls.convert_filter(model=model.filter) - - search_params = None if model.params is None else cls.convert_search_params(model.params) - - with_payload = ( - None - if model.with_payload is None - else cls.convert_with_payload_interface(model.with_payload) - ) - - with_vectors = ( - None if model.with_vector is None else cls.convert_with_vectors(model.with_vector) - ) - - lookup_from = ( - None if model.lookup_from is None else cls.convert_lookup_location(model.lookup_from) - ) - - shard_key_selector = ( - None if model.shard_key is None else cls.convert_shard_key_selector(model.shard_key) - ) - - return grpc.DiscoverPoints( - collection_name=collection_name, - target=target, - context=context, - filter=query_filter, - limit=model.limit, - offset=model.offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - using=model.using, - lookup_from=lookup_from, - shard_key_selector=shard_key_selector, - ) - - @classmethod - def convert_recommend_points( - cls, model: rest.RecommendRequest, collection_name: str - ) -> grpc.RecommendPoints: - return cls.convert_recommend_request(model, collection_name) - @classmethod def convert_tokenizer_type(cls, model: rest.TokenizerType) -> grpc.TokenizerType: if model == rest.TokenizerType.WORD: @@ -3976,6 +3733,7 @@ def convert_text_index_params(cls, model: rest.TextIndexParams) -> grpc.TextInde else None, phrase_matching=model.phrase_matching, stemmer=cls.convert_stemmer(model.stemmer) if model.stemmer is not None else None, + ascii_folding=model.ascii_folding if model.ascii_folding is not None else None, ) @classmethod @@ -4325,6 +4083,7 @@ def convert_vectors_config_diff(cls, model: rest.VectorsConfigDiff) -> grpc.Vect def convert_point_insert_operation( cls, model: rest.PointInsertOperations ) -> list[grpc.PointStruct]: + # shard key and update_filter are converted in the parent function if isinstance(model, rest.PointsBatch): vectors_batch: list[grpc.Vectors] = cls.convert_batch_vector_struct( model.batch.vectors, len(model.batch.ids) @@ -4360,10 +4119,16 @@ def convert_points_update_operation( if model.upsert.shard_key else None ) + update_filter = ( + cls.convert_filter(model.upsert.update_filter) + if model.upsert.update_filter + else None + ) return grpc.PointsUpdateOperation( upsert=grpc.PointsUpdateOperation.PointStructList( points=cls.convert_point_insert_operation(model.upsert), shard_key_selector=shard_key_selector, + update_filter=update_filter, ) ) elif isinstance(model, rest.DeleteOperation): @@ -4469,13 +4234,18 @@ def convert_points_update_operation( if model.update_vectors.shard_key else None ) - + update_filter = ( + cls.convert_filter(model.update_vectors.update_filter) + if model.update_vectors.update_filter + else None + ) return grpc.PointsUpdateOperation( update_vectors=grpc.PointsUpdateOperation.UpdateVectors( points=[ cls.convert_point_vectors(point) for point in model.update_vectors.points ], shard_key_selector=shard_key_selector, + update_filter=update_filter, ) ) elif isinstance(model, rest.DeleteVectorsOperation): @@ -4504,13 +4274,6 @@ def convert_points_update_operation( else: raise ValueError(f"invalid UpdateOperation model: {model}") # pragma: no cover - @classmethod - def convert_init_from(cls, model: rest.InitFrom) -> str: - if isinstance(model, rest.InitFrom): - return model.collection - else: - raise ValueError(f"invalid InitFrom model: {model}") # pragma: no cover - @classmethod def convert_recommend_strategy(cls, model: rest.RecommendStrategy) -> grpc.RecommendStrategy: if model == rest.RecommendStrategy.AVERAGE_VECTOR: @@ -4571,8 +4334,51 @@ def convert_shard_key(cls, model: rest.ShardKey) -> grpc.ShardKey: raise ValueError(f"invalid ShardKey model: {model}") # pragma: no cover + @classmethod + def convert_replica_state(cls, model: rest.ReplicaState) -> grpc.ReplicaState: + if model == rest.ReplicaState.ACTIVE: + return grpc.ReplicaState.Active + + if model == rest.ReplicaState.DEAD: + return grpc.ReplicaState.Dead + + if model == rest.ReplicaState.PARTIAL: + return grpc.ReplicaState.Partial + + if model == rest.ReplicaState.INITIALIZING: + return grpc.ReplicaState.Initializing + + if model == rest.ReplicaState.LISTENER: + return grpc.ReplicaState.Listener + + if model == rest.ReplicaState.PARTIALSNAPSHOT: + return grpc.ReplicaState.PartialSnapshot + + if model == rest.ReplicaState.RECOVERY: + return grpc.ReplicaState.Recovery + + if model == rest.ReplicaState.RESHARDING: + return grpc.ReplicaState.Resharding + + if model == rest.ReplicaState.RESHARDINGSCALEDOWN: + return grpc.ReplicaState.ReshardingScaleDown + + if model == rest.ReplicaState.ACTIVEREAD: + return grpc.ReplicaState.ActiveRead + + raise ValueError(f"invalid ReplicaState model: {model}") # pragma: no cover + @classmethod def convert_shard_key_selector(cls, model: rest.ShardKeySelector) -> grpc.ShardKeySelector: + if isinstance( + model, rest.ShardKeyWithFallback + ): # have to be the first, since it's a part of the union + # of rest.ShardKeySelector type + return grpc.ShardKeySelector( + shard_keys=[cls.convert_shard_key(model.target)], + fallback=cls.convert_shard_key(model.fallback), + ) + if isinstance(model, get_args_subscribed(rest.ShardKey)): return grpc.ShardKeySelector(shard_keys=[cls.convert_shard_key(model)]) @@ -4688,6 +4494,7 @@ def convert_strict_mode_config(cls, model: rest.StrictModeConfig) -> grpc.Strict if model.sparse_config else None ), + max_payload_index_count=model.max_payload_index_count, ) @classmethod @@ -4721,6 +4528,7 @@ def convert_strict_mode_config_output( if model.sparse_config else None ), + max_payload_index_count=model.max_payload_index_count, ) @classmethod diff --git a/qdrant_client/embed/_inspection_cache.py b/qdrant_client/embed/_inspection_cache.py index b61a7f29..1c88f5fb 100644 --- a/qdrant_client/embed/_inspection_cache.py +++ b/qdrant_client/embed/_inspection_cache.py @@ -3,9 +3,11 @@ "AbortShardTransfer": [], "AbortTransferOperation": [], "AbsExpression": [], + "AcornSearchParams": [], "Batch": ["vectors"], "BinaryQuantization": [], "BinaryQuantizationConfig": [], + "Bm25Config": [], "BoolIndexParams": [], "ChangeAliasesOperation": [], "ClearPayloadOperation": [], @@ -65,20 +67,19 @@ "HnswConfigDiff": [], "Image": [], "InferenceObject": [], - "InitFrom": [], "IntegerIndexParams": [], "IsEmptyCondition": [], "IsNullCondition": [], "KeywordIndexParams": [], "LinDecayExpression": [], "LnExpression": [], - "LocksOption": [], "Log10Expression": [], "LookupLocation": [], "MatchAny": [], "MatchExcept": [], "MatchPhrase": [], "MatchText": [], + "MatchTextAny": [], "MatchValue": [], "MinShould": [], "Mmr": [], @@ -200,10 +201,14 @@ "RenameAlias": [], "RenameAliasOperation": [], "Replica": [], + "ReplicatePoints": [], + "ReplicatePointsOperation": [], "ReplicateShard": [], "ReplicateShardOperation": [], "RestartTransfer": [], "RestartTransferOperation": [], + "Rrf": [], + "RrfQuery": [], "SampleQuery": [], "ScalarQuantization": [], "ScalarQuantizationConfig": [], @@ -215,6 +220,7 @@ "SearchRequestBatch": [], "SetPayload": [], "SetPayloadOperation": [], + "ShardKeyWithFallback": [], "ShardSnapshotRecover": [], "SnapshotRecover": [], "SnowballParams": [], @@ -437,7 +443,7 @@ "midpoint": { "anyOf": [{"type": "number"}, {"type": "null"}], "default": None, - "description": "The midpoint of the decay. Defaults to 0.5. Output will be this value when `|x - target| == scale`.", + "description": "The midpoint of the decay. Should be between 0 and 1.Defaults to 0.5. Output will be this value when `|x - target| == scale`.", "title": "Midpoint", }, }, @@ -583,6 +589,7 @@ "anyOf": [ {"$ref": "#/$defs/MatchValue"}, {"$ref": "#/$defs/MatchText"}, + {"$ref": "#/$defs/MatchTextAny"}, {"$ref": "#/$defs/MatchPhrase"}, {"$ref": "#/$defs/MatchAny"}, {"$ref": "#/$defs/MatchExcept"}, @@ -857,7 +864,13 @@ "properties": { "has_id": { "description": "ID-based filtering condition", - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "title": "Has Id", "type": "array", } @@ -1053,6 +1066,20 @@ "title": "MatchText", "type": "object", }, + "MatchTextAny": { + "additionalProperties": False, + "description": "Full-text match of at least one token of the string.", + "properties": { + "text_any": { + "description": "Full-text match of at least one token of the string.", + "title": "Text Any", + "type": "string", + } + }, + "required": ["text_any"], + "title": "MatchTextAny", + "type": "object", + }, "MatchValue": { "additionalProperties": False, "description": "Exact match of the given value", @@ -1430,24 +1457,104 @@ "title": "ValuesCount", "type": "object", }, + "Bm25Config": { + "additionalProperties": False, + "description": "Configuration of the local bm25 models.", + "properties": { + "k": { + "anyOf": [{"type": "number"}, {"type": "null"}], + "default": 1.2, + "description": "Controls term frequency saturation. Higher values mean term frequency has more impact. Default is 1.2", + "title": "K", + }, + "b": { + "anyOf": [{"type": "number"}, {"type": "null"}], + "default": 0.75, + "description": "Controls document length normalization. Ranges from 0 (no normalization) to 1 (full normalization). Higher values mean longer documents have less impact. Default is 0.75.", + "title": "B", + }, + "avg_len": { + "anyOf": [{"type": "number"}, {"type": "null"}], + "default": 256, + "description": "Expected average document length in the collection. Default is 256.", + "title": "Avg Len", + }, + "tokenizer": { + "anyOf": [{"$ref": "#/$defs/TokenizerType"}, {"type": "null"}], + "default": None, + "description": "Configuration of the local bm25 models.", + }, + "language": { + "anyOf": [{"type": "string"}, {"type": "null"}], + "default": None, + "description": "Defines which language to use for text preprocessing. This parameter is used to construct default stopwords filter and stemmer. To disable language-specific processing, set this to `'language': 'none'`. If not specified, English is assumed.", + "title": "Language", + }, + "lowercase": { + "anyOf": [{"type": "boolean"}, {"type": "null"}], + "default": None, + "description": "Lowercase the text before tokenization. Default is `true`.", + "title": "Lowercase", + }, + "ascii_folding": { + "anyOf": [{"type": "boolean"}, {"type": "null"}], + "default": None, + "description": "If true, normalize tokens by folding accented characters to ASCII (e.g., 'ação' -> 'acao'). Default is `false`.", + "title": "Ascii Folding", + }, + "stopwords": { + "anyOf": [ + {"$ref": "#/$defs/Language"}, + {"$ref": "#/$defs/StopwordsSet"}, + {"type": "null"}, + ], + "default": None, + "description": "Configuration of the stopwords filter. Supports list of pre-defined languages and custom stopwords. Default: initialized for specified `language` or English if not specified.", + "title": "Stopwords", + }, + "stemmer": { + "anyOf": [{"$ref": "#/$defs/SnowballParams"}, {"type": "null"}], + "default": None, + "description": "Configuration of the stemmer. Processes tokens to their root form. Default: initialized Snowball stemmer for specified `language` or English if not specified.", + }, + "min_token_len": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "description": "Minimum token length to keep. If token is shorter than this, it will be discarded. Default is `None`, which means no minimum length.", + "title": "Min Token Len", + }, + "max_token_len": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "description": "Maximum token length to keep. If token is longer than this, it will be discarded. Default is `None`, which means no maximum length.", + "title": "Max Token Len", + }, + }, + "title": "Bm25Config", + "type": "object", + }, "Document": { "additionalProperties": False, "description": "WARN: Work-in-progress, unimplemented Text document for embedding. Requires inference infrastructure, unimplemented.", "properties": { "text": { - "description": "Text of the document This field will be used as input for the embedding model", + "description": "Text of the document. This field will be used as input for the embedding model.", "title": "Text", "type": "string", }, "model": { - "description": "Name of the model used to generate the vector List of available models depends on a provider", + "description": "Name of the model used to generate the vector. List of available models depends on a provider.", "title": "Model", "type": "string", }, "options": { - "anyOf": [{"additionalProperties": True, "type": "object"}, {"type": "null"}], + "anyOf": [ + {"additionalProperties": True, "type": "object"}, + {"$ref": "#/$defs/Bm25Config"}, + {"type": "null"}, + ], "default": None, - "description": "Parameters for the model Values of the parameters are model-specific", + "description": "Additional options for the model, will be passed to the inference service as-is. See model cards for available options.", "title": "Options", }, }, @@ -1464,7 +1571,7 @@ "title": "Image", }, "model": { - "description": "Name of the model used to generate the vector List of available models depends on a provider", + "description": "Name of the model used to generate the vector. List of available models depends on a provider.", "title": "Model", "type": "string", }, @@ -1484,11 +1591,11 @@ "description": "WARN: Work-in-progress, unimplemented Custom object for embedding. Requires inference infrastructure, unimplemented.", "properties": { "object": { - "description": "Arbitrary data, used as input for the embedding model Used if the model requires more than one input or a custom input", + "description": "Arbitrary data, used as input for the embedding model. Used if the model requires more than one input or a custom input.", "title": "Object", }, "model": { - "description": "Name of the model used to generate the vector List of available models depends on a provider", + "description": "Name of the model used to generate the vector. List of available models depends on a provider.", "title": "Model", "type": "string", }, @@ -1503,6 +1610,79 @@ "title": "InferenceObject", "type": "object", }, + "Language": { + "enum": [ + "arabic", + "azerbaijani", + "basque", + "bengali", + "catalan", + "chinese", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "greek", + "hebrew", + "hinglish", + "hungarian", + "indonesian", + "italian", + "japanese", + "kazakh", + "nepali", + "norwegian", + "portuguese", + "romanian", + "russian", + "slovene", + "spanish", + "swedish", + "tajik", + "turkish", + ], + "title": "Language", + "type": "string", + }, + "Snowball": {"enum": ["snowball"], "title": "Snowball", "type": "string"}, + "SnowballLanguage": { + "description": "Languages supported by snowball stemmer.", + "enum": [ + "arabic", + "armenian", + "danish", + "dutch", + "english", + "finnish", + "french", + "german", + "greek", + "hungarian", + "italian", + "norwegian", + "portuguese", + "romanian", + "russian", + "spanish", + "swedish", + "tamil", + "turkish", + ], + "title": "SnowballLanguage", + "type": "string", + }, + "SnowballParams": { + "additionalProperties": False, + "properties": { + "type": {"$ref": "#/$defs/Snowball", "description": ""}, + "language": {"$ref": "#/$defs/SnowballLanguage", "description": ""}, + }, + "required": ["type", "language"], + "title": "SnowballParams", + "type": "object", + }, "SparseVector": { "additionalProperties": False, "description": "Sparse vector structure", @@ -1524,6 +1704,33 @@ "title": "SparseVector", "type": "object", }, + "StopwordsSet": { + "additionalProperties": False, + "properties": { + "languages": { + "anyOf": [ + {"items": {"$ref": "#/$defs/Language"}, "type": "array"}, + {"type": "null"}, + ], + "default": None, + "description": "Set of languages to use for stopwords. Multiple pre-defined lists of stopwords can be combined.", + "title": "Languages", + }, + "custom": { + "anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], + "default": None, + "description": "Custom stopwords set. Will be merged with the languages set.", + "title": "Custom", + }, + }, + "title": "StopwordsSet", + "type": "object", + }, + "TokenizerType": { + "enum": ["prefix", "whitespace", "word", "multilingual"], + "title": "TokenizerType", + "type": "string", + }, "BinaryQuantizationConfig": { "additionalProperties": False, "properties": { @@ -1645,6 +1852,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -1661,7 +1869,13 @@ "properties": { "points": { "description": "", - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "title": "Points", "type": "array", }, @@ -1673,6 +1887,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -1684,6 +1899,24 @@ "title": "PointIdsList", "type": "object", }, + "ShardKeyWithFallback": { + "additionalProperties": False, + "properties": { + "target": { + "anyOf": [{"type": "integer"}, {"type": "string"}], + "description": "", + "title": "Target", + }, + "fallback": { + "anyOf": [{"type": "integer"}, {"type": "string"}], + "description": "", + "title": "Fallback", + }, + }, + "required": ["target", "fallback"], + "title": "ShardKeyWithFallback", + "type": "object", + }, "ContextPair": { "additionalProperties": False, "properties": { @@ -1694,6 +1927,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -1708,6 +1942,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -1757,7 +1992,7 @@ "full_scan_threshold": { "anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, - "description": "Minimal size (in kilobytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256", + "description": "Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. This measures the total size of vectors being queried against. When the maximum estimated amount of points that a condition satisfies is smaller than `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index traversal for better performance. Note: 1Kb = 1 vector of size 256", "title": "Full Scan Threshold", }, "max_indexing_threads": { @@ -1778,24 +2013,16 @@ "description": "Custom M param for additional payload-aware HNSW links. If not set, default M will be used.", "title": "Payload M", }, + "inline_storage": { + "anyOf": [{"type": "boolean"}, {"type": "null"}], + "default": None, + "description": "Store copies of original and quantized vectors within the HNSW index file. Default: false. Enabling this option will trade the search speed for disk usage by reducing amount of random seeks during the search. Requires quantized vectors to be enabled. Multi-vectors are not supported.", + "title": "Inline Storage", + }, }, "title": "HnswConfigDiff", "type": "object", }, - "InitFrom": { - "additionalProperties": False, - "description": "Operation for creating new collection and (optionally) specify index params", - "properties": { - "collection": { - "description": "Operation for creating new collection and (optionally) specify index params", - "title": "Collection", - "type": "string", - } - }, - "required": ["collection"], - "title": "InitFrom", - "type": "object", - }, "MaxOptimizationThreadsSetting": { "enum": ["auto"], "title": "MaxOptimizationThreadsSetting", @@ -2011,13 +2238,13 @@ "search_max_hnsw_ef": { "anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, - "description": "Max HNSW value allowed in search parameters.", + "description": "Max HNSW ef value allowed in search parameters.", "title": "Search Max Hnsw Ef", }, "search_allow_exact": { "anyOf": [{"type": "boolean"}, {"type": "null"}], "default": None, - "description": "Whether exact search is allowed or not.", + "description": "Whether exact search is allowed.", "title": "Search Allow Exact", }, "search_max_oversampling": { @@ -2083,7 +2310,7 @@ {"type": "null"}, ], "default": None, - "description": "Multivector configuration", + "description": "Multivector strict mode configuration", "title": "Multivector Config", }, "sparse_config": { @@ -2095,9 +2322,15 @@ {"type": "null"}, ], "default": None, - "description": "Sparse vector configuration", + "description": "Sparse vector strict mode configuration", "title": "Sparse Config", }, + "max_payload_index_count": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "description": "Max number of payload indexes in a collection", + "title": "Max Payload Index Count", + }, }, "title": "StrictModeConfig", "type": "object", @@ -2189,6 +2422,12 @@ "description": "Number of WAL segments to create ahead of actually used ones", "title": "Wal Segments Ahead", }, + "wal_retain_closed": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "description": "Number of closed WAL segments to retain", + "title": "Wal Retain Closed", + }, }, "title": "WalConfigDiff", "type": "object", @@ -2324,107 +2563,12 @@ "type": "object", }, "KeywordIndexType": {"enum": ["keyword"], "title": "KeywordIndexType", "type": "string"}, - "Language": { - "enum": [ - "arabic", - "azerbaijani", - "basque", - "bengali", - "catalan", - "chinese", - "danish", - "dutch", - "english", - "finnish", - "french", - "german", - "greek", - "hebrew", - "hinglish", - "hungarian", - "indonesian", - "italian", - "japanese", - "kazakh", - "nepali", - "norwegian", - "portuguese", - "romanian", - "russian", - "slovene", - "spanish", - "swedish", - "tajik", - "turkish", - ], - "title": "Language", - "type": "string", - }, "PayloadSchemaType": { "description": "All possible names of payload types", "enum": ["keyword", "integer", "float", "geo", "text", "bool", "datetime", "uuid"], "title": "PayloadSchemaType", "type": "string", }, - "Snowball": {"enum": ["snowball"], "title": "Snowball", "type": "string"}, - "SnowballLanguage": { - "description": "Languages supported by snowball stemmer.", - "enum": [ - "arabic", - "armenian", - "danish", - "dutch", - "english", - "finnish", - "french", - "german", - "greek", - "hungarian", - "italian", - "norwegian", - "portuguese", - "romanian", - "russian", - "spanish", - "swedish", - "tamil", - "turkish", - ], - "title": "SnowballLanguage", - "type": "string", - }, - "SnowballParams": { - "additionalProperties": False, - "properties": { - "type": {"$ref": "#/$defs/Snowball", "description": ""}, - "language": {"$ref": "#/$defs/SnowballLanguage", "description": ""}, - }, - "required": ["type", "language"], - "title": "SnowballParams", - "type": "object", - }, - "StopwordsSet": { - "additionalProperties": False, - "properties": { - "languages": { - "anyOf": [ - {"items": {"$ref": "#/$defs/Language"}, "type": "array"}, - {"type": "null"}, - ], - "default": None, - "description": "", - "title": "Languages", - }, - "custom": { - "anyOf": [{"items": {"type": "string"}, "type": "array"}, {"type": "null"}], - "default": None, - "description": "", - "title": "Custom", - }, - }, - "title": "StopwordsSet", - "type": "object", - }, "TextIndexParams": { "additionalProperties": False, "properties": { @@ -2452,6 +2596,12 @@ "description": "If true, lowercase all tokens. Default: true.", "title": "Lowercase", }, + "ascii_folding": { + "anyOf": [{"type": "boolean"}, {"type": "null"}], + "default": None, + "description": "If true, normalize tokens by folding accented characters to ASCII (e.g., 'ação' -> 'acao'). Default: false.", + "title": "Ascii Folding", + }, "phrase_matching": { "anyOf": [{"type": "boolean"}, {"type": "null"}], "default": None, @@ -2485,11 +2635,6 @@ "type": "object", }, "TextIndexType": {"enum": ["text"], "title": "TextIndexType", "type": "string"}, - "TokenizerType": { - "enum": ["prefix", "whitespace", "word", "multilingual"], - "title": "TokenizerType", - "type": "string", - }, "UuidIndexParams": { "additionalProperties": False, "properties": { @@ -2512,6 +2657,23 @@ "type": "object", }, "UuidIndexType": {"enum": ["uuid"], "title": "UuidIndexType", "type": "string"}, + "ReplicaState": { + "description": "State of the single shard within a replica set.", + "enum": [ + "Active", + "Dead", + "Partial", + "Initializing", + "Listener", + "PartialSnapshot", + "Recovery", + "Resharding", + "ReshardingScaleDown", + "ActiveRead", + ], + "title": "ReplicaState", + "type": "string", + }, "CreateShardingKey": { "additionalProperties": False, "properties": { @@ -2538,6 +2700,11 @@ "description": "Placement of shards for this key List of peer ids, that can be used to place shards for this key If not specified, will be randomly placed among all peers", "title": "Placement", }, + "initial_state": { + "anyOf": [{"$ref": "#/$defs/ReplicaState"}, {"type": "null"}], + "default": None, + "description": "Initial state of the shards for this key If not specified, will be `Initializing` first and then `Active` Warning: do not change this unless you know what you are doing", + }, }, "required": ["shard_key"], "title": "CreateShardingKey", @@ -2556,7 +2723,13 @@ "points": { "anyOf": [ { - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "type": "array", }, {"type": "null"}, @@ -2578,6 +2751,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -2595,7 +2769,13 @@ "points": { "anyOf": [ { - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "type": "array", }, {"type": "null"}, @@ -2623,6 +2803,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -2644,6 +2825,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -2664,6 +2846,26 @@ "title": "DiscoverInput", "type": "object", }, + "AcornSearchParams": { + "additionalProperties": False, + "description": "ACORN-related search parameters", + "properties": { + "enable": { + "anyOf": [{"type": "boolean"}, {"type": "null"}], + "default": False, + "description": "If true, then ACORN may be used for the HNSW search based on filters selectivity. Improves search recall for searches with multiple low-selectivity payload filters, at cost of performance.", + "title": "Enable", + }, + "max_selectivity": { + "anyOf": [{"type": "number"}, {"type": "null"}], + "default": None, + "description": "Maximum selectivity of filters to enable ACORN. If estimated filters selectivity is higher than this value, ACORN will not be used. Selectivity is estimated as: `estimated number of points satisfying the filters / total number of points`. 0.0 for never, 1.0 for always. Default is 0.4.", + "title": "Max Selectivity", + }, + }, + "title": "AcornSearchParams", + "type": "object", + }, "ContextExamplePair": { "additionalProperties": False, "properties": { @@ -2671,6 +2873,7 @@ "anyOf": [ {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"items": {"type": "number"}, "type": "array"}, {"$ref": "#/$defs/SparseVector"}, ], @@ -2681,6 +2884,7 @@ "anyOf": [ {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"items": {"type": "number"}, "type": "array"}, {"$ref": "#/$defs/SparseVector"}, ], @@ -2715,6 +2919,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -2807,6 +3012,11 @@ "description": "If enabled, the engine will only perform search among indexed or small segments. Using this option prevents slow searches in case of delayed index, but does not guarantee that all uploaded vectors will be included in search results", "title": "Indexed Only", }, + "acorn": { + "anyOf": [{"$ref": "#/$defs/AcornSearchParams"}, {"type": "null"}], + "default": None, + "description": "ACORN search params", + }, }, "title": "SearchParams", "type": "object", @@ -2823,6 +3033,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -2833,6 +3044,7 @@ "anyOf": [ {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"items": {"type": "number"}, "type": "array"}, {"$ref": "#/$defs/SparseVector"}, {"type": "null"}, @@ -2933,7 +3145,7 @@ "type": "object", }, "Fusion": { - "description": "Fusion algorithm allows to combine results of multiple prefetches. Available fusion algorithms: * `rrf` - Reciprocal Rank Fusion * `dbsf` - Distribution-Based Score Fusion", + "description": "Fusion algorithm allows to combine results of multiple prefetches. Available fusion algorithms: * `rrf` - Reciprocal Rank Fusion (with default parameters) * `dbsf` - Distribution-Based Score Fusion", "enum": ["rrf", "dbsf"], "title": "Fusion", "type": "string", @@ -3045,7 +3257,13 @@ "points": { "anyOf": [ { - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "type": "array", }, {"type": "null"}, @@ -3067,6 +3285,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -3089,7 +3308,13 @@ "properties": { "ids": { "description": "", - "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, + "items": { + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ] + }, "title": "Ids", "type": "array", }, @@ -3147,7 +3372,11 @@ "additionalProperties": False, "properties": { "id": { - "anyOf": [{"type": "integer"}, {"type": "string"}], + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ], "description": "", "title": "Id", }, @@ -3274,6 +3503,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -3324,6 +3554,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -3333,6 +3564,7 @@ {"$ref": "#/$defs/ContextQuery"}, {"$ref": "#/$defs/OrderByQuery"}, {"$ref": "#/$defs/FusionQuery"}, + {"$ref": "#/$defs/RrfQuery"}, {"$ref": "#/$defs/FormulaQuery"}, {"$ref": "#/$defs/SampleQuery"}, {"type": "null"}, @@ -3394,6 +3626,7 @@ }, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -3420,6 +3653,7 @@ }, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -3455,6 +3689,27 @@ "title": "RecommendStrategy", "type": "string", }, + "Rrf": { + "additionalProperties": False, + "description": "Parameters for Reciprocal Rank Fusion", + "properties": { + "k": { + "anyOf": [{"type": "integer"}, {"type": "null"}], + "default": None, + "description": "K parameter for reciprocal rank fusion", + "title": "K", + } + }, + "title": "Rrf", + "type": "object", + }, + "RrfQuery": { + "additionalProperties": False, + "properties": {"rrf": {"$ref": "#/$defs/Rrf", "description": ""}}, + "required": ["rrf"], + "title": "RrfQuery", + "type": "object", + }, "Sample": {"enum": ["random"], "title": "Sample", "type": "string"}, "SampleQuery": { "additionalProperties": False, @@ -3509,6 +3764,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -3532,6 +3788,7 @@ {"items": {"items": {"type": "number"}, "type": "array"}, "type": "array"}, {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"$ref": "#/$defs/Document"}, {"$ref": "#/$defs/Image"}, {"$ref": "#/$defs/InferenceObject"}, @@ -3541,6 +3798,7 @@ {"$ref": "#/$defs/ContextQuery"}, {"$ref": "#/$defs/OrderByQuery"}, {"$ref": "#/$defs/FusionQuery"}, + {"$ref": "#/$defs/RrfQuery"}, {"$ref": "#/$defs/FormulaQuery"}, {"$ref": "#/$defs/SampleQuery"}, {"type": "null"}, @@ -3626,6 +3884,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -3639,6 +3898,7 @@ "anyOf": [ {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"items": {"type": "number"}, "type": "array"}, {"$ref": "#/$defs/SparseVector"}, ] @@ -3658,6 +3918,7 @@ "anyOf": [ {"type": "integer"}, {"type": "string"}, + {"format": "uuid", "type": "string"}, {"items": {"type": "number"}, "type": "array"}, {"$ref": "#/$defs/SparseVector"}, ] @@ -3740,6 +4001,29 @@ "title": "RecommendRequest", "type": "object", }, + "ReplicatePoints": { + "additionalProperties": False, + "properties": { + "filter": { + "anyOf": [{"$ref": "#/$defs/Filter"}, {"type": "null"}], + "default": None, + "description": "", + }, + "from_shard_key": { + "anyOf": [{"type": "integer"}, {"type": "string"}], + "description": "", + "title": "From Shard Key", + }, + "to_shard_key": { + "anyOf": [{"type": "integer"}, {"type": "string"}], + "description": "", + "title": "To Shard Key", + }, + }, + "required": ["from_shard_key", "to_shard_key"], + "title": "ReplicatePoints", + "type": "object", + }, "ReplicateShard": { "additionalProperties": False, "properties": { @@ -3826,6 +4110,7 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, @@ -4054,7 +4339,11 @@ "additionalProperties": False, "properties": { "id": { - "anyOf": [{"type": "integer"}, {"type": "string"}], + "anyOf": [ + {"type": "integer"}, + {"type": "string"}, + {"format": "uuid", "type": "string"}, + ], "description": "", "title": "Id", }, @@ -4102,12 +4391,18 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, "description": "", "title": "Shard Key", }, + "update_filter": { + "anyOf": [{"$ref": "#/$defs/Filter"}, {"type": "null"}], + "default": None, + "description": "If specified, only points that match this filter will be updated, others will be inserted", + }, }, "required": ["batch"], "title": "PointsBatch", @@ -4130,12 +4425,18 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, "description": "", "title": "Shard Key", }, + "update_filter": { + "anyOf": [{"$ref": "#/$defs/Filter"}, {"type": "null"}], + "default": None, + "description": "If specified, only points that match this filter will be updated, others will be inserted", + }, }, "required": ["points"], "title": "PointsList", @@ -4165,12 +4466,18 @@ "items": {"anyOf": [{"type": "integer"}, {"type": "string"}]}, "type": "array", }, + {"$ref": "#/$defs/ShardKeyWithFallback"}, {"type": "null"}, ], "default": None, "description": "", "title": "Shard Key", }, + "update_filter": { + "anyOf": [{"$ref": "#/$defs/Filter"}, {"type": "null"}], + "default": None, + "description": "", + }, }, "required": ["points"], "title": "UpdateVectors", diff --git a/qdrant_client/grpc/collections_pb2.py b/qdrant_client/grpc/collections_pb2.py index c01e1b8a..691eff9d 100644 --- a/qdrant_client/grpc/collections_pb2.py +++ b/qdrant_client/grpc/collections_pb2.py @@ -1,1074 +1,266 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: collections.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() +from . import json_with_int_pb2 as json__with__int__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11\x63ollections.proto\x12\x06qdrant\"\x83\x03\n\x0cVectorParams\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\"\n\x08\x64istance\x18\x02 \x01(\x0e\x32\x10.qdrant.Distance\x12\x30\n\x0bhnsw_config\x18\x03 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12<\n\x13quantization_config\x18\x04 \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\x01\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x02\x88\x01\x01\x12\'\n\x08\x64\x61tatype\x18\x06 \x01(\x0e\x32\x10.qdrant.DatatypeH\x03\x88\x01\x01\x12:\n\x12multivector_config\x18\x07 \x01(\x0b\x32\x19.qdrant.MultiVectorConfigH\x04\x88\x01\x01\x42\x0e\n\x0c_hnsw_configB\x16\n\x14_quantization_configB\n\n\x08_on_diskB\x0b\n\t_datatypeB\x15\n\x13_multivector_config\"\xd0\x01\n\x10VectorParamsDiff\x12\x30\n\x0bhnsw_config\x18\x01 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12@\n\x13quantization_config\x18\x02 \x01(\x0b\x32\x1e.qdrant.QuantizationConfigDiffH\x01\x88\x01\x01\x12\x14\n\x07on_disk\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x0e\n\x0c_hnsw_configB\x16\n\x14_quantization_configB\n\n\x08_on_disk\"\x82\x01\n\x0fVectorParamsMap\x12-\n\x03map\x18\x01 \x03(\x0b\x32 .qdrant.VectorParamsMap.MapEntry\x1a@\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.qdrant.VectorParams:\x02\x38\x01\"\x8e\x01\n\x13VectorParamsDiffMap\x12\x31\n\x03map\x18\x01 \x03(\x0b\x32$.qdrant.VectorParamsDiffMap.MapEntry\x1a\x44\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.qdrant.VectorParamsDiff:\x02\x38\x01\"p\n\rVectorsConfig\x12&\n\x06params\x18\x01 \x01(\x0b\x32\x14.qdrant.VectorParamsH\x00\x12-\n\nparams_map\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorParamsMapH\x00\x42\x08\n\x06\x63onfig\"|\n\x11VectorsConfigDiff\x12*\n\x06params\x18\x01 \x01(\x0b\x32\x18.qdrant.VectorParamsDiffH\x00\x12\x31\n\nparams_map\x18\x02 \x01(\x0b\x32\x1b.qdrant.VectorParamsDiffMapH\x00\x42\x08\n\x06\x63onfig\"\x83\x01\n\x12SparseVectorParams\x12-\n\x05index\x18\x01 \x01(\x0b\x32\x19.qdrant.SparseIndexConfigH\x00\x88\x01\x01\x12\'\n\x08modifier\x18\x02 \x01(\x0e\x32\x10.qdrant.ModifierH\x01\x88\x01\x01\x42\x08\n\x06_indexB\x0b\n\t_modifier\"\x8e\x01\n\x12SparseVectorConfig\x12\x30\n\x03map\x18\x01 \x03(\x0b\x32#.qdrant.SparseVectorConfig.MapEntry\x1a\x46\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.qdrant.SparseVectorParams:\x02\x38\x01\"F\n\x11MultiVectorConfig\x12\x31\n\ncomparator\x18\x01 \x01(\x0e\x32\x1d.qdrant.MultiVectorComparator\"3\n\x18GetCollectionInfoRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"2\n\x17\x43ollectionExistsRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"\"\n\x10\x43ollectionExists\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\"R\n\x18\x43ollectionExistsResponse\x12(\n\x06result\x18\x01 \x01(\x0b\x32\x18.qdrant.CollectionExists\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\x18\n\x16ListCollectionsRequest\"%\n\x15\x43ollectionDescription\x12\x0c\n\x04name\x18\x01 \x01(\t\"Q\n\x19GetCollectionInfoResponse\x12&\n\x06result\x18\x01 \x01(\x0b\x32\x16.qdrant.CollectionInfo\x12\x0c\n\x04time\x18\x02 \x01(\x01\"[\n\x17ListCollectionsResponse\x12\x32\n\x0b\x63ollections\x18\x01 \x03(\x0b\x32\x1d.qdrant.CollectionDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\x84\x01\n\x16MaxOptimizationThreads\x12\x0f\n\x05value\x18\x01 \x01(\x04H\x00\x12\x39\n\x07setting\x18\x02 \x01(\x0e\x32&.qdrant.MaxOptimizationThreads.SettingH\x00\"\x13\n\x07Setting\x12\x08\n\x04\x41uto\x10\x00\x42\t\n\x07variant\",\n\x0fOptimizerStatus\x12\n\n\x02ok\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"\x90\x02\n\x0eHnswConfigDiff\x12\x0e\n\x01m\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x19\n\x0c\x65\x66_construct\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12 \n\x13\x66ull_scan_threshold\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12!\n\x14max_indexing_threads\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x16\n\tpayload_m\x18\x06 \x01(\x04H\x05\x88\x01\x01\x42\x04\n\x02_mB\x0f\n\r_ef_constructB\x16\n\x14_full_scan_thresholdB\x17\n\x15_max_indexing_threadsB\n\n\x08_on_diskB\x0c\n\n_payload_m\"\xa5\x01\n\x11SparseIndexConfig\x12 \n\x13\x66ull_scan_threshold\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\'\n\x08\x64\x61tatype\x18\x03 \x01(\x0e\x32\x10.qdrant.DatatypeH\x02\x88\x01\x01\x42\x16\n\x14_full_scan_thresholdB\n\n\x08_on_diskB\x0b\n\t_datatype\"y\n\rWalConfigDiff\x12\x1c\n\x0fwal_capacity_mb\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x1f\n\x12wal_segments_ahead\x18\x02 \x01(\x04H\x01\x88\x01\x01\x42\x12\n\x10_wal_capacity_mbB\x15\n\x13_wal_segments_ahead\"\xe6\x04\n\x14OptimizersConfigDiff\x12\x1e\n\x11\x64\x65leted_threshold\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12%\n\x18vacuum_min_vector_number\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12#\n\x16\x64\x65\x66\x61ult_segment_number\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12\x1d\n\x10max_segment_size\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x1d\n\x10memmap_threshold\x18\x05 \x01(\x04H\x04\x88\x01\x01\x12\x1f\n\x12indexing_threshold\x18\x06 \x01(\x04H\x05\x88\x01\x01\x12\x1f\n\x12\x66lush_interval_sec\x18\x07 \x01(\x04H\x06\x88\x01\x01\x12\x30\n#deprecated_max_optimization_threads\x18\x08 \x01(\x04H\x07\x88\x01\x01\x12\x45\n\x18max_optimization_threads\x18\t \x01(\x0b\x32\x1e.qdrant.MaxOptimizationThreadsH\x08\x88\x01\x01\x42\x14\n\x12_deleted_thresholdB\x1b\n\x19_vacuum_min_vector_numberB\x19\n\x17_default_segment_numberB\x13\n\x11_max_segment_sizeB\x13\n\x11_memmap_thresholdB\x15\n\x13_indexing_thresholdB\x15\n\x13_flush_interval_secB&\n$_deprecated_max_optimization_threadsB\x1b\n\x19_max_optimization_threads\"\x88\x01\n\x12ScalarQuantization\x12&\n\x04type\x18\x01 \x01(\x0e\x32\x18.qdrant.QuantizationType\x12\x15\n\x08quantile\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x17\n\nalways_ram\x18\x03 \x01(\x08H\x01\x88\x01\x01\x42\x0b\n\t_quantileB\r\n\x0b_always_ram\"l\n\x13ProductQuantization\x12-\n\x0b\x63ompression\x18\x01 \x01(\x0e\x32\x18.qdrant.CompressionRatio\x12\x17\n\nalways_ram\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\r\n\x0b_always_ram\"\xb6\x01\n\x1f\x42inaryQuantizationQueryEncoding\x12\x42\n\x07setting\x18\x04 \x01(\x0e\x32/.qdrant.BinaryQuantizationQueryEncoding.SettingH\x00\"D\n\x07Setting\x12\x0b\n\x07\x44\x65\x66\x61ult\x10\x00\x12\n\n\x06\x42inary\x10\x01\x12\x0f\n\x0bScalar4Bits\x10\x02\x12\x0f\n\x0bScalar8Bits\x10\x03\x42\t\n\x07variant\"\xdd\x01\n\x12\x42inaryQuantization\x12\x17\n\nalways_ram\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\".qdrant.BinaryQuantizationEncodingH\x01\x88\x01\x01\x12\x44\n\x0equery_encoding\x18\x03 \x01(\x0b\x32\'.qdrant.BinaryQuantizationQueryEncodingH\x02\x88\x01\x01\x42\r\n\x0b_always_ramB\x0b\n\t_encodingB\x11\n\x0f_query_encoding\"\xb0\x01\n\x12QuantizationConfig\x12,\n\x06scalar\x18\x01 \x01(\x0b\x32\x1a.qdrant.ScalarQuantizationH\x00\x12.\n\x07product\x18\x02 \x01(\x0b\x32\x1b.qdrant.ProductQuantizationH\x00\x12,\n\x06\x62inary\x18\x03 \x01(\x0b\x32\x1a.qdrant.BinaryQuantizationH\x00\x42\x0e\n\x0cquantization\"\n\n\x08\x44isabled\"\xda\x01\n\x16QuantizationConfigDiff\x12,\n\x06scalar\x18\x01 \x01(\x0b\x32\x1a.qdrant.ScalarQuantizationH\x00\x12.\n\x07product\x18\x02 \x01(\x0b\x32\x1b.qdrant.ProductQuantizationH\x00\x12$\n\x08\x64isabled\x18\x03 \x01(\x0b\x32\x10.qdrant.DisabledH\x00\x12,\n\x06\x62inary\x18\x04 \x01(\x0b\x32\x1a.qdrant.BinaryQuantizationH\x00\x42\x0e\n\x0cquantization\"\xf7\x08\n\x10StrictModeConfig\x12\x14\n\x07\x65nabled\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x1c\n\x0fmax_query_limit\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x18\n\x0bmax_timeout\x18\x03 \x01(\rH\x02\x88\x01\x01\x12)\n\x1cunindexed_filtering_retrieve\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\'\n\x1aunindexed_filtering_update\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x1f\n\x12search_max_hnsw_ef\x18\x06 \x01(\rH\x05\x88\x01\x01\x12\x1f\n\x12search_allow_exact\x18\x07 \x01(\x08H\x06\x88\x01\x01\x12$\n\x17search_max_oversampling\x18\x08 \x01(\x02H\x07\x88\x01\x01\x12!\n\x14upsert_max_batchsize\x18\t \x01(\x04H\x08\x88\x01\x01\x12-\n max_collection_vector_size_bytes\x18\n \x01(\x04H\t\x88\x01\x01\x12\x1c\n\x0fread_rate_limit\x18\x0b \x01(\rH\n\x88\x01\x01\x12\x1d\n\x10write_rate_limit\x18\x0c \x01(\rH\x0b\x88\x01\x01\x12.\n!max_collection_payload_size_bytes\x18\r \x01(\x04H\x0c\x88\x01\x01\x12\"\n\x15\x66ilter_max_conditions\x18\x0e \x01(\x04H\r\x88\x01\x01\x12\x1f\n\x12\x63ondition_max_size\x18\x0f \x01(\x04H\x0e\x88\x01\x01\x12\x44\n\x12multivector_config\x18\x10 \x01(\x0b\x32#.qdrant.StrictModeMultivectorConfigH\x0f\x88\x01\x01\x12:\n\rsparse_config\x18\x11 \x01(\x0b\x32\x1e.qdrant.StrictModeSparseConfigH\x10\x88\x01\x01\x12\x1d\n\x10max_points_count\x18\x12 \x01(\x04H\x11\x88\x01\x01\x42\n\n\x08_enabledB\x12\n\x10_max_query_limitB\x0e\n\x0c_max_timeoutB\x1f\n\x1d_unindexed_filtering_retrieveB\x1d\n\x1b_unindexed_filtering_updateB\x15\n\x13_search_max_hnsw_efB\x15\n\x13_search_allow_exactB\x1a\n\x18_search_max_oversamplingB\x17\n\x15_upsert_max_batchsizeB#\n!_max_collection_vector_size_bytesB\x12\n\x10_read_rate_limitB\x13\n\x11_write_rate_limitB$\n\"_max_collection_payload_size_bytesB\x18\n\x16_filter_max_conditionsB\x15\n\x13_condition_max_sizeB\x15\n\x13_multivector_configB\x10\n\x0e_sparse_configB\x13\n\x11_max_points_count\"\xb0\x01\n\x16StrictModeSparseConfig\x12G\n\rsparse_config\x18\x01 \x03(\x0b\x32\x30.qdrant.StrictModeSparseConfig.SparseConfigEntry\x1aM\n\x11SparseConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.qdrant.StrictModeSparse:\x02\x38\x01\":\n\x10StrictModeSparse\x12\x17\n\nmax_length\x18\n \x01(\x04H\x00\x88\x01\x01\x42\r\n\x0b_max_length\"\xce\x01\n\x1bStrictModeMultivectorConfig\x12V\n\x12multivector_config\x18\x01 \x03(\x0b\x32:.qdrant.StrictModeMultivectorConfig.MultivectorConfigEntry\x1aW\n\x16MultivectorConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x1d.qdrant.StrictModeMultivector:\x02\x38\x01\"A\n\x15StrictModeMultivector\x12\x18\n\x0bmax_vectors\x18\x01 \x01(\x04H\x00\x88\x01\x01\x42\x0e\n\x0c_max_vectors\"\xd7\x07\n\x10\x43reateCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x30\n\x0bhnsw_config\x18\x04 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12.\n\nwal_config\x18\x05 \x01(\x0b\x32\x15.qdrant.WalConfigDiffH\x01\x88\x01\x01\x12<\n\x11optimizers_config\x18\x06 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiffH\x02\x88\x01\x01\x12\x19\n\x0cshard_number\x18\x07 \x01(\rH\x03\x88\x01\x01\x12\x1c\n\x0fon_disk_payload\x18\x08 \x01(\x08H\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\t \x01(\x04H\x05\x88\x01\x01\x12\x32\n\x0evectors_config\x18\n \x01(\x0b\x32\x15.qdrant.VectorsConfigH\x06\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x0b \x01(\rH\x07\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x0c \x01(\rH\x08\x88\x01\x01\x12!\n\x14init_from_collection\x18\r \x01(\tH\t\x88\x01\x01\x12<\n\x13quantization_config\x18\x0e \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\n\x88\x01\x01\x12\x34\n\x0fsharding_method\x18\x0f \x01(\x0e\x32\x16.qdrant.ShardingMethodH\x0b\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\x10 \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x0c\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\x11 \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\r\x88\x01\x01\x42\x0e\n\x0c_hnsw_configB\r\n\x0b_wal_configB\x14\n\x12_optimizers_configB\x0f\n\r_shard_numberB\x12\n\x10_on_disk_payloadB\n\n\x08_timeoutB\x11\n\x0f_vectors_configB\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x17\n\x15_init_from_collectionB\x16\n\x14_quantization_configB\x12\n\x10_sharding_methodB\x18\n\x16_sparse_vectors_configB\x15\n\x13_strict_mode_configJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"\xf2\x04\n\x10UpdateCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12<\n\x11optimizers_config\x18\x02 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiffH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x31\n\x06params\x18\x04 \x01(\x0b\x32\x1c.qdrant.CollectionParamsDiffH\x02\x88\x01\x01\x12\x30\n\x0bhnsw_config\x18\x05 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x03\x88\x01\x01\x12\x36\n\x0evectors_config\x18\x06 \x01(\x0b\x32\x19.qdrant.VectorsConfigDiffH\x04\x88\x01\x01\x12@\n\x13quantization_config\x18\x07 \x01(\x0b\x32\x1e.qdrant.QuantizationConfigDiffH\x05\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\x08 \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x06\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\t \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\x07\x88\x01\x01\x42\x14\n\x12_optimizers_configB\n\n\x08_timeoutB\t\n\x07_paramsB\x0e\n\x0c_hnsw_configB\x11\n\x0f_vectors_configB\x16\n\x14_quantization_configB\x18\n\x16_sparse_vectors_configB\x15\n\x13_strict_mode_config\"M\n\x10\x44\x65leteCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x14\n\x07timeout\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\";\n\x1b\x43ollectionOperationResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\xee\x03\n\x10\x43ollectionParams\x12\x14\n\x0cshard_number\x18\x03 \x01(\r\x12\x17\n\x0fon_disk_payload\x18\x04 \x01(\x08\x12\x32\n\x0evectors_config\x18\x05 \x01(\x0b\x32\x15.qdrant.VectorsConfigH\x00\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x06 \x01(\rH\x01\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x07 \x01(\rH\x02\x88\x01\x01\x12 \n\x13read_fan_out_factor\x18\x08 \x01(\rH\x03\x88\x01\x01\x12\x34\n\x0fsharding_method\x18\t \x01(\x0e\x32\x16.qdrant.ShardingMethodH\x04\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\n \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x05\x88\x01\x01\x42\x11\n\x0f_vectors_configB\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x16\n\x14_read_fan_out_factorB\x12\n\x10_sharding_methodB\x18\n\x16_sparse_vectors_configJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03\"\xfe\x01\n\x14\x43ollectionParamsDiff\x12\x1f\n\x12replication_factor\x18\x01 \x01(\rH\x00\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x1c\n\x0fon_disk_payload\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12 \n\x13read_fan_out_factor\x18\x04 \x01(\rH\x03\x88\x01\x01\x42\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x12\n\x10_on_disk_payloadB\x16\n\x14_read_fan_out_factor\"\xf4\x02\n\x10\x43ollectionConfig\x12(\n\x06params\x18\x01 \x01(\x0b\x32\x18.qdrant.CollectionParams\x12+\n\x0bhnsw_config\x18\x02 \x01(\x0b\x32\x16.qdrant.HnswConfigDiff\x12\x36\n\x10optimizer_config\x18\x03 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiff\x12)\n\nwal_config\x18\x04 \x01(\x0b\x32\x15.qdrant.WalConfigDiff\x12<\n\x13quantization_config\x18\x05 \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\x00\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\x06 \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\x01\x88\x01\x01\x42\x16\n\x14_quantization_configB\x15\n\x13_strict_mode_config\"\\\n\x12KeywordIndexParams\x12\x16\n\tis_tenant\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\x0c\n\n_is_tenantB\n\n\x08_on_disk\"\xa0\x01\n\x12IntegerIndexParams\x12\x13\n\x06lookup\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\x05range\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x14\n\x07on_disk\x18\x04 \x01(\x08H\x03\x88\x01\x01\x42\t\n\x07_lookupB\x08\n\x06_rangeB\x0f\n\r_is_principalB\n\n\x08_on_disk\"`\n\x10\x46loatIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\n\n\x08_on_diskB\x0f\n\r_is_principal\"2\n\x0eGeoIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x42\n\n\x08_on_disk\"1\n\x0cStopwordsSet\x12\x11\n\tlanguages\x18\x01 \x03(\t\x12\x0e\n\x06\x63ustom\x18\x02 \x03(\t\"\x8a\x03\n\x0fTextIndexParams\x12(\n\ttokenizer\x18\x01 \x01(\x0e\x32\x15.qdrant.TokenizerType\x12\x16\n\tlowercase\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x1a\n\rmin_token_len\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x1a\n\rmax_token_len\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x03\x88\x01\x01\x12,\n\tstopwords\x18\x06 \x01(\x0b\x32\x14.qdrant.StopwordsSetH\x04\x88\x01\x01\x12\x1c\n\x0fphrase_matching\x18\x07 \x01(\x08H\x05\x88\x01\x01\x12/\n\x07stemmer\x18\x08 \x01(\x0b\x32\x19.qdrant.StemmingAlgorithmH\x06\x88\x01\x01\x42\x0c\n\n_lowercaseB\x10\n\x0e_min_token_lenB\x10\n\x0e_max_token_lenB\n\n\x08_on_diskB\x0c\n\n_stopwordsB\x12\n\x10_phrase_matchingB\n\n\x08_stemmer\"R\n\x11StemmingAlgorithm\x12*\n\x08snowball\x18\x01 \x01(\x0b\x32\x16.qdrant.SnowballParamsH\x00\x42\x11\n\x0fstemming_params\"\"\n\x0eSnowballParams\x12\x10\n\x08language\x18\x01 \x01(\t\"3\n\x0f\x42oolIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x42\n\n\x08_on_disk\"c\n\x13\x44\x61tetimeIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\n\n\x08_on_diskB\x0f\n\r_is_principal\"Y\n\x0fUuidIndexParams\x12\x16\n\tis_tenant\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\x0c\n\n_is_tenantB\n\n\x08_on_disk\"\xe8\x03\n\x12PayloadIndexParams\x12:\n\x14keyword_index_params\x18\x03 \x01(\x0b\x32\x1a.qdrant.KeywordIndexParamsH\x00\x12:\n\x14integer_index_params\x18\x02 \x01(\x0b\x32\x1a.qdrant.IntegerIndexParamsH\x00\x12\x36\n\x12\x66loat_index_params\x18\x04 \x01(\x0b\x32\x18.qdrant.FloatIndexParamsH\x00\x12\x32\n\x10geo_index_params\x18\x05 \x01(\x0b\x32\x16.qdrant.GeoIndexParamsH\x00\x12\x34\n\x11text_index_params\x18\x01 \x01(\x0b\x32\x17.qdrant.TextIndexParamsH\x00\x12\x34\n\x11\x62ool_index_params\x18\x06 \x01(\x0b\x32\x17.qdrant.BoolIndexParamsH\x00\x12<\n\x15\x64\x61tetime_index_params\x18\x07 \x01(\x0b\x32\x1b.qdrant.DatetimeIndexParamsH\x00\x12\x34\n\x11uuid_index_params\x18\x08 \x01(\x0b\x32\x17.qdrant.UuidIndexParamsH\x00\x42\x0e\n\x0cindex_params\"\x9d\x01\n\x11PayloadSchemaInfo\x12,\n\tdata_type\x18\x01 \x01(\x0e\x32\x19.qdrant.PayloadSchemaType\x12/\n\x06params\x18\x02 \x01(\x0b\x32\x1a.qdrant.PayloadIndexParamsH\x00\x88\x01\x01\x12\x13\n\x06points\x18\x03 \x01(\x04H\x01\x88\x01\x01\x42\t\n\x07_paramsB\t\n\x07_points\"\xe7\x03\n\x0e\x43ollectionInfo\x12(\n\x06status\x18\x01 \x01(\x0e\x32\x18.qdrant.CollectionStatus\x12\x31\n\x10optimizer_status\x18\x02 \x01(\x0b\x32\x17.qdrant.OptimizerStatus\x12\x1a\n\rvectors_count\x18\x03 \x01(\x04H\x00\x88\x01\x01\x12\x16\n\x0esegments_count\x18\x04 \x01(\x04\x12(\n\x06\x63onfig\x18\x07 \x01(\x0b\x32\x18.qdrant.CollectionConfig\x12\x41\n\x0epayload_schema\x18\x08 \x03(\x0b\x32).qdrant.CollectionInfo.PayloadSchemaEntry\x12\x19\n\x0cpoints_count\x18\t \x01(\x04H\x01\x88\x01\x01\x12\"\n\x15indexed_vectors_count\x18\n \x01(\x04H\x02\x88\x01\x01\x1aO\n\x12PayloadSchemaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.qdrant.PayloadSchemaInfo:\x02\x38\x01\x42\x10\n\x0e_vectors_countB\x0f\n\r_points_countB\x18\n\x16_indexed_vectors_countJ\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"[\n\rChangeAliases\x12(\n\x07\x61\x63tions\x18\x01 \x03(\x0b\x32\x17.qdrant.AliasOperations\x12\x14\n\x07timeout\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"\xa2\x01\n\x0f\x41liasOperations\x12+\n\x0c\x63reate_alias\x18\x01 \x01(\x0b\x32\x13.qdrant.CreateAliasH\x00\x12+\n\x0crename_alias\x18\x02 \x01(\x0b\x32\x13.qdrant.RenameAliasH\x00\x12+\n\x0c\x64\x65lete_alias\x18\x03 \x01(\x0b\x32\x13.qdrant.DeleteAliasH\x00\x42\x08\n\x06\x61\x63tion\":\n\x0b\x43reateAlias\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x12\n\nalias_name\x18\x02 \x01(\t\"=\n\x0bRenameAlias\x12\x16\n\x0eold_alias_name\x18\x01 \x01(\t\x12\x16\n\x0enew_alias_name\x18\x02 \x01(\t\"!\n\x0b\x44\x65leteAlias\x12\x12\n\nalias_name\x18\x01 \x01(\t\"\x14\n\x12ListAliasesRequest\"7\n\x1cListCollectionAliasesRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"?\n\x10\x41liasDescription\x12\x12\n\nalias_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\"N\n\x13ListAliasesResponse\x12)\n\x07\x61liases\x18\x01 \x03(\x0b\x32\x18.qdrant.AliasDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"7\n\x1c\x43ollectionClusterInfoRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"6\n\x08ShardKey\x12\x11\n\x07keyword\x18\x01 \x01(\tH\x00\x12\x10\n\x06number\x18\x02 \x01(\x04H\x00\x42\x05\n\x03key\"\x95\x01\n\x0eLocalShardInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x14\n\x0cpoints_count\x18\x02 \x01(\x04\x12#\n\x05state\x18\x03 \x01(\x0e\x32\x14.qdrant.ReplicaState\x12(\n\tshard_key\x18\x04 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x42\x0c\n\n_shard_key\"\x91\x01\n\x0fRemoteShardInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\x12#\n\x05state\x18\x03 \x01(\x0e\x32\x14.qdrant.ReplicaState\x12(\n\tshard_key\x18\x04 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x42\x0c\n\n_shard_key\"w\n\x11ShardTransferInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x0c\n\x04\x66rom\x18\x02 \x01(\x04\x12\n\n\x02to\x18\x03 \x01(\x04\x12\x0c\n\x04sync\x18\x04 \x01(\x08\x42\x0e\n\x0c_to_shard_id\"\x9b\x01\n\x0eReshardingInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\x12(\n\tshard_key\x18\x03 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x12.\n\tdirection\x18\x04 \x01(\x0e\x32\x1b.qdrant.ReshardingDirectionB\x0c\n\n_shard_key\"\x8e\x02\n\x1d\x43ollectionClusterInfoResponse\x12\x0f\n\x07peer_id\x18\x01 \x01(\x04\x12\x13\n\x0bshard_count\x18\x02 \x01(\x04\x12,\n\x0clocal_shards\x18\x03 \x03(\x0b\x32\x16.qdrant.LocalShardInfo\x12.\n\rremote_shards\x18\x04 \x03(\x0b\x32\x17.qdrant.RemoteShardInfo\x12\x32\n\x0fshard_transfers\x18\x05 \x03(\x0b\x32\x19.qdrant.ShardTransferInfo\x12\x35\n\x15resharding_operations\x18\x06 \x03(\x0b\x32\x16.qdrant.ReshardingInfo\"\xae\x01\n\tMoveShard\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12\x30\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodH\x01\x88\x01\x01\x42\x0e\n\x0c_to_shard_idB\t\n\x07_method\"\xb3\x01\n\x0eReplicateShard\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12\x30\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodH\x01\x88\x01\x01\x42\x0e\n\x0c_to_shard_idB\t\n\x07_method\"z\n\x12\x41\x62ortShardTransfer\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x42\x0e\n\x0c_to_shard_id\"\xa4\x01\n\x0fRestartTransfer\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12+\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodB\x0e\n\x0c_to_shard_id\",\n\x07Replica\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\"\xae\x01\n\x0e\x43reateShardKey\x12#\n\tshard_key\x18\x01 \x01(\x0b\x32\x10.qdrant.ShardKey\x12\x1a\n\rshards_number\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\x11\n\tplacement\x18\x04 \x03(\x04\x42\x10\n\x0e_shards_numberB\x15\n\x13_replication_factor\"5\n\x0e\x44\x65leteShardKey\x12#\n\tshard_key\x18\x01 \x01(\x0b\x32\x10.qdrant.ShardKey\"\xc5\x03\n#UpdateCollectionClusterSetupRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\nmove_shard\x18\x02 \x01(\x0b\x32\x11.qdrant.MoveShardH\x00\x12\x31\n\x0freplicate_shard\x18\x03 \x01(\x0b\x32\x16.qdrant.ReplicateShardH\x00\x12\x34\n\x0e\x61\x62ort_transfer\x18\x04 \x01(\x0b\x32\x1a.qdrant.AbortShardTransferH\x00\x12\'\n\x0c\x64rop_replica\x18\x05 \x01(\x0b\x32\x0f.qdrant.ReplicaH\x00\x12\x32\n\x10\x63reate_shard_key\x18\x07 \x01(\x0b\x32\x16.qdrant.CreateShardKeyH\x00\x12\x32\n\x10\x64\x65lete_shard_key\x18\x08 \x01(\x0b\x32\x16.qdrant.DeleteShardKeyH\x00\x12\x33\n\x10restart_transfer\x18\t \x01(\x0b\x32\x17.qdrant.RestartTransferH\x00\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x01\x88\x01\x01\x42\x0b\n\toperationB\n\n\x08_timeout\"6\n$UpdateCollectionClusterSetupResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\"{\n\x15\x43reateShardKeyRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x07request\x18\x02 \x01(\x0b\x32\x16.qdrant.CreateShardKey\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"{\n\x15\x44\x65leteShardKeyRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x07request\x18\x02 \x01(\x0b\x32\x16.qdrant.DeleteShardKey\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"(\n\x16\x43reateShardKeyResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\"(\n\x16\x44\x65leteShardKeyResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08*<\n\x08\x44\x61tatype\x12\x0b\n\x07\x44\x65\x66\x61ult\x10\x00\x12\x0b\n\x07\x46loat32\x10\x01\x12\t\n\x05Uint8\x10\x02\x12\x0b\n\x07\x46loat16\x10\x03*\x1d\n\x08Modifier\x12\x08\n\x04None\x10\x00\x12\x07\n\x03Idf\x10\x01*#\n\x15MultiVectorComparator\x12\n\n\x06MaxSim\x10\x00*O\n\x08\x44istance\x12\x13\n\x0fUnknownDistance\x10\x00\x12\n\n\x06\x43osine\x10\x01\x12\n\n\x06\x45uclid\x10\x02\x12\x07\n\x03\x44ot\x10\x03\x12\r\n\tManhattan\x10\x04*Y\n\x10\x43ollectionStatus\x12\x1b\n\x17UnknownCollectionStatus\x10\x00\x12\t\n\x05Green\x10\x01\x12\n\n\x06Yellow\x10\x02\x12\x07\n\x03Red\x10\x03\x12\x08\n\x04Grey\x10\x04*~\n\x11PayloadSchemaType\x12\x0f\n\x0bUnknownType\x10\x00\x12\x0b\n\x07Keyword\x10\x01\x12\x0b\n\x07Integer\x10\x02\x12\t\n\x05\x46loat\x10\x03\x12\x07\n\x03Geo\x10\x04\x12\x08\n\x04Text\x10\x05\x12\x08\n\x04\x42ool\x10\x06\x12\x0c\n\x08\x44\x61tetime\x10\x07\x12\x08\n\x04Uuid\x10\x08*5\n\x10QuantizationType\x12\x17\n\x13UnknownQuantization\x10\x00\x12\x08\n\x04Int8\x10\x01*=\n\x10\x43ompressionRatio\x12\x06\n\x02x4\x10\x00\x12\x06\n\x02x8\x10\x01\x12\x07\n\x03x16\x10\x02\x12\x07\n\x03x32\x10\x03\x12\x07\n\x03x64\x10\x04*I\n\x1a\x42inaryQuantizationEncoding\x12\n\n\x06OneBit\x10\x00\x12\x0b\n\x07TwoBits\x10\x01\x12\x12\n\x0eOneAndHalfBits\x10\x02*&\n\x0eShardingMethod\x12\x08\n\x04\x41uto\x10\x00\x12\n\n\x06\x43ustom\x10\x01*T\n\rTokenizerType\x12\x0b\n\x07Unknown\x10\x00\x12\n\n\x06Prefix\x10\x01\x12\x0e\n\nWhitespace\x10\x02\x12\x08\n\x04Word\x10\x03\x12\x10\n\x0cMultilingual\x10\x04*\x9d\x01\n\x0cReplicaState\x12\n\n\x06\x41\x63tive\x10\x00\x12\x08\n\x04\x44\x65\x61\x64\x10\x01\x12\x0b\n\x07Partial\x10\x02\x12\x10\n\x0cInitializing\x10\x03\x12\x0c\n\x08Listener\x10\x04\x12\x13\n\x0fPartialSnapshot\x10\x05\x12\x0c\n\x08Recovery\x10\x06\x12\x0e\n\nResharding\x10\x07\x12\x17\n\x13ReshardingScaleDown\x10\x08*\'\n\x13ReshardingDirection\x12\x06\n\x02Up\x10\x00\x12\x08\n\x04\x44own\x10\x01*a\n\x13ShardTransferMethod\x12\x11\n\rStreamRecords\x10\x00\x12\x0c\n\x08Snapshot\x10\x01\x12\x0c\n\x08WalDelta\x10\x02\x12\x1b\n\x17ReshardingStreamRecords\x10\x03\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - -_DATATYPE = DESCRIPTOR.enum_types_by_name['Datatype'] -Datatype = enum_type_wrapper.EnumTypeWrapper(_DATATYPE) -_MODIFIER = DESCRIPTOR.enum_types_by_name['Modifier'] -Modifier = enum_type_wrapper.EnumTypeWrapper(_MODIFIER) -_MULTIVECTORCOMPARATOR = DESCRIPTOR.enum_types_by_name['MultiVectorComparator'] -MultiVectorComparator = enum_type_wrapper.EnumTypeWrapper(_MULTIVECTORCOMPARATOR) -_DISTANCE = DESCRIPTOR.enum_types_by_name['Distance'] -Distance = enum_type_wrapper.EnumTypeWrapper(_DISTANCE) -_COLLECTIONSTATUS = DESCRIPTOR.enum_types_by_name['CollectionStatus'] -CollectionStatus = enum_type_wrapper.EnumTypeWrapper(_COLLECTIONSTATUS) -_PAYLOADSCHEMATYPE = DESCRIPTOR.enum_types_by_name['PayloadSchemaType'] -PayloadSchemaType = enum_type_wrapper.EnumTypeWrapper(_PAYLOADSCHEMATYPE) -_QUANTIZATIONTYPE = DESCRIPTOR.enum_types_by_name['QuantizationType'] -QuantizationType = enum_type_wrapper.EnumTypeWrapper(_QUANTIZATIONTYPE) -_COMPRESSIONRATIO = DESCRIPTOR.enum_types_by_name['CompressionRatio'] -CompressionRatio = enum_type_wrapper.EnumTypeWrapper(_COMPRESSIONRATIO) -_BINARYQUANTIZATIONENCODING = DESCRIPTOR.enum_types_by_name['BinaryQuantizationEncoding'] -BinaryQuantizationEncoding = enum_type_wrapper.EnumTypeWrapper(_BINARYQUANTIZATIONENCODING) -_SHARDINGMETHOD = DESCRIPTOR.enum_types_by_name['ShardingMethod'] -ShardingMethod = enum_type_wrapper.EnumTypeWrapper(_SHARDINGMETHOD) -_TOKENIZERTYPE = DESCRIPTOR.enum_types_by_name['TokenizerType'] -TokenizerType = enum_type_wrapper.EnumTypeWrapper(_TOKENIZERTYPE) -_REPLICASTATE = DESCRIPTOR.enum_types_by_name['ReplicaState'] -ReplicaState = enum_type_wrapper.EnumTypeWrapper(_REPLICASTATE) -_RESHARDINGDIRECTION = DESCRIPTOR.enum_types_by_name['ReshardingDirection'] -ReshardingDirection = enum_type_wrapper.EnumTypeWrapper(_RESHARDINGDIRECTION) -_SHARDTRANSFERMETHOD = DESCRIPTOR.enum_types_by_name['ShardTransferMethod'] -ShardTransferMethod = enum_type_wrapper.EnumTypeWrapper(_SHARDTRANSFERMETHOD) -Default = 0 -Float32 = 1 -Uint8 = 2 -Float16 = 3 -globals()['None'] = 0 -Idf = 1 -MaxSim = 0 -UnknownDistance = 0 -Cosine = 1 -Euclid = 2 -Dot = 3 -Manhattan = 4 -UnknownCollectionStatus = 0 -Green = 1 -Yellow = 2 -Red = 3 -Grey = 4 -UnknownType = 0 -Keyword = 1 -Integer = 2 -Float = 3 -Geo = 4 -Text = 5 -Bool = 6 -Datetime = 7 -Uuid = 8 -UnknownQuantization = 0 -Int8 = 1 -x4 = 0 -x8 = 1 -x16 = 2 -x32 = 3 -x64 = 4 -OneBit = 0 -TwoBits = 1 -OneAndHalfBits = 2 -Auto = 0 -Custom = 1 -Unknown = 0 -Prefix = 1 -Whitespace = 2 -Word = 3 -Multilingual = 4 -Active = 0 -Dead = 1 -Partial = 2 -Initializing = 3 -Listener = 4 -PartialSnapshot = 5 -Recovery = 6 -Resharding = 7 -ReshardingScaleDown = 8 -Up = 0 -Down = 1 -StreamRecords = 0 -Snapshot = 1 -WalDelta = 2 -ReshardingStreamRecords = 3 - - -_VECTORPARAMS = DESCRIPTOR.message_types_by_name['VectorParams'] -_VECTORPARAMSDIFF = DESCRIPTOR.message_types_by_name['VectorParamsDiff'] -_VECTORPARAMSMAP = DESCRIPTOR.message_types_by_name['VectorParamsMap'] -_VECTORPARAMSMAP_MAPENTRY = _VECTORPARAMSMAP.nested_types_by_name['MapEntry'] -_VECTORPARAMSDIFFMAP = DESCRIPTOR.message_types_by_name['VectorParamsDiffMap'] -_VECTORPARAMSDIFFMAP_MAPENTRY = _VECTORPARAMSDIFFMAP.nested_types_by_name['MapEntry'] -_VECTORSCONFIG = DESCRIPTOR.message_types_by_name['VectorsConfig'] -_VECTORSCONFIGDIFF = DESCRIPTOR.message_types_by_name['VectorsConfigDiff'] -_SPARSEVECTORPARAMS = DESCRIPTOR.message_types_by_name['SparseVectorParams'] -_SPARSEVECTORCONFIG = DESCRIPTOR.message_types_by_name['SparseVectorConfig'] -_SPARSEVECTORCONFIG_MAPENTRY = _SPARSEVECTORCONFIG.nested_types_by_name['MapEntry'] -_MULTIVECTORCONFIG = DESCRIPTOR.message_types_by_name['MultiVectorConfig'] -_GETCOLLECTIONINFOREQUEST = DESCRIPTOR.message_types_by_name['GetCollectionInfoRequest'] -_COLLECTIONEXISTSREQUEST = DESCRIPTOR.message_types_by_name['CollectionExistsRequest'] -_COLLECTIONEXISTS = DESCRIPTOR.message_types_by_name['CollectionExists'] -_COLLECTIONEXISTSRESPONSE = DESCRIPTOR.message_types_by_name['CollectionExistsResponse'] -_LISTCOLLECTIONSREQUEST = DESCRIPTOR.message_types_by_name['ListCollectionsRequest'] -_COLLECTIONDESCRIPTION = DESCRIPTOR.message_types_by_name['CollectionDescription'] -_GETCOLLECTIONINFORESPONSE = DESCRIPTOR.message_types_by_name['GetCollectionInfoResponse'] -_LISTCOLLECTIONSRESPONSE = DESCRIPTOR.message_types_by_name['ListCollectionsResponse'] -_MAXOPTIMIZATIONTHREADS = DESCRIPTOR.message_types_by_name['MaxOptimizationThreads'] -_OPTIMIZERSTATUS = DESCRIPTOR.message_types_by_name['OptimizerStatus'] -_HNSWCONFIGDIFF = DESCRIPTOR.message_types_by_name['HnswConfigDiff'] -_SPARSEINDEXCONFIG = DESCRIPTOR.message_types_by_name['SparseIndexConfig'] -_WALCONFIGDIFF = DESCRIPTOR.message_types_by_name['WalConfigDiff'] -_OPTIMIZERSCONFIGDIFF = DESCRIPTOR.message_types_by_name['OptimizersConfigDiff'] -_SCALARQUANTIZATION = DESCRIPTOR.message_types_by_name['ScalarQuantization'] -_PRODUCTQUANTIZATION = DESCRIPTOR.message_types_by_name['ProductQuantization'] -_BINARYQUANTIZATIONQUERYENCODING = DESCRIPTOR.message_types_by_name['BinaryQuantizationQueryEncoding'] -_BINARYQUANTIZATION = DESCRIPTOR.message_types_by_name['BinaryQuantization'] -_QUANTIZATIONCONFIG = DESCRIPTOR.message_types_by_name['QuantizationConfig'] -_DISABLED = DESCRIPTOR.message_types_by_name['Disabled'] -_QUANTIZATIONCONFIGDIFF = DESCRIPTOR.message_types_by_name['QuantizationConfigDiff'] -_STRICTMODECONFIG = DESCRIPTOR.message_types_by_name['StrictModeConfig'] -_STRICTMODESPARSECONFIG = DESCRIPTOR.message_types_by_name['StrictModeSparseConfig'] -_STRICTMODESPARSECONFIG_SPARSECONFIGENTRY = _STRICTMODESPARSECONFIG.nested_types_by_name['SparseConfigEntry'] -_STRICTMODESPARSE = DESCRIPTOR.message_types_by_name['StrictModeSparse'] -_STRICTMODEMULTIVECTORCONFIG = DESCRIPTOR.message_types_by_name['StrictModeMultivectorConfig'] -_STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY = _STRICTMODEMULTIVECTORCONFIG.nested_types_by_name['MultivectorConfigEntry'] -_STRICTMODEMULTIVECTOR = DESCRIPTOR.message_types_by_name['StrictModeMultivector'] -_CREATECOLLECTION = DESCRIPTOR.message_types_by_name['CreateCollection'] -_UPDATECOLLECTION = DESCRIPTOR.message_types_by_name['UpdateCollection'] -_DELETECOLLECTION = DESCRIPTOR.message_types_by_name['DeleteCollection'] -_COLLECTIONOPERATIONRESPONSE = DESCRIPTOR.message_types_by_name['CollectionOperationResponse'] -_COLLECTIONPARAMS = DESCRIPTOR.message_types_by_name['CollectionParams'] -_COLLECTIONPARAMSDIFF = DESCRIPTOR.message_types_by_name['CollectionParamsDiff'] -_COLLECTIONCONFIG = DESCRIPTOR.message_types_by_name['CollectionConfig'] -_KEYWORDINDEXPARAMS = DESCRIPTOR.message_types_by_name['KeywordIndexParams'] -_INTEGERINDEXPARAMS = DESCRIPTOR.message_types_by_name['IntegerIndexParams'] -_FLOATINDEXPARAMS = DESCRIPTOR.message_types_by_name['FloatIndexParams'] -_GEOINDEXPARAMS = DESCRIPTOR.message_types_by_name['GeoIndexParams'] -_STOPWORDSSET = DESCRIPTOR.message_types_by_name['StopwordsSet'] -_TEXTINDEXPARAMS = DESCRIPTOR.message_types_by_name['TextIndexParams'] -_STEMMINGALGORITHM = DESCRIPTOR.message_types_by_name['StemmingAlgorithm'] -_SNOWBALLPARAMS = DESCRIPTOR.message_types_by_name['SnowballParams'] -_BOOLINDEXPARAMS = DESCRIPTOR.message_types_by_name['BoolIndexParams'] -_DATETIMEINDEXPARAMS = DESCRIPTOR.message_types_by_name['DatetimeIndexParams'] -_UUIDINDEXPARAMS = DESCRIPTOR.message_types_by_name['UuidIndexParams'] -_PAYLOADINDEXPARAMS = DESCRIPTOR.message_types_by_name['PayloadIndexParams'] -_PAYLOADSCHEMAINFO = DESCRIPTOR.message_types_by_name['PayloadSchemaInfo'] -_COLLECTIONINFO = DESCRIPTOR.message_types_by_name['CollectionInfo'] -_COLLECTIONINFO_PAYLOADSCHEMAENTRY = _COLLECTIONINFO.nested_types_by_name['PayloadSchemaEntry'] -_CHANGEALIASES = DESCRIPTOR.message_types_by_name['ChangeAliases'] -_ALIASOPERATIONS = DESCRIPTOR.message_types_by_name['AliasOperations'] -_CREATEALIAS = DESCRIPTOR.message_types_by_name['CreateAlias'] -_RENAMEALIAS = DESCRIPTOR.message_types_by_name['RenameAlias'] -_DELETEALIAS = DESCRIPTOR.message_types_by_name['DeleteAlias'] -_LISTALIASESREQUEST = DESCRIPTOR.message_types_by_name['ListAliasesRequest'] -_LISTCOLLECTIONALIASESREQUEST = DESCRIPTOR.message_types_by_name['ListCollectionAliasesRequest'] -_ALIASDESCRIPTION = DESCRIPTOR.message_types_by_name['AliasDescription'] -_LISTALIASESRESPONSE = DESCRIPTOR.message_types_by_name['ListAliasesResponse'] -_COLLECTIONCLUSTERINFOREQUEST = DESCRIPTOR.message_types_by_name['CollectionClusterInfoRequest'] -_SHARDKEY = DESCRIPTOR.message_types_by_name['ShardKey'] -_LOCALSHARDINFO = DESCRIPTOR.message_types_by_name['LocalShardInfo'] -_REMOTESHARDINFO = DESCRIPTOR.message_types_by_name['RemoteShardInfo'] -_SHARDTRANSFERINFO = DESCRIPTOR.message_types_by_name['ShardTransferInfo'] -_RESHARDINGINFO = DESCRIPTOR.message_types_by_name['ReshardingInfo'] -_COLLECTIONCLUSTERINFORESPONSE = DESCRIPTOR.message_types_by_name['CollectionClusterInfoResponse'] -_MOVESHARD = DESCRIPTOR.message_types_by_name['MoveShard'] -_REPLICATESHARD = DESCRIPTOR.message_types_by_name['ReplicateShard'] -_ABORTSHARDTRANSFER = DESCRIPTOR.message_types_by_name['AbortShardTransfer'] -_RESTARTTRANSFER = DESCRIPTOR.message_types_by_name['RestartTransfer'] -_REPLICA = DESCRIPTOR.message_types_by_name['Replica'] -_CREATESHARDKEY = DESCRIPTOR.message_types_by_name['CreateShardKey'] -_DELETESHARDKEY = DESCRIPTOR.message_types_by_name['DeleteShardKey'] -_UPDATECOLLECTIONCLUSTERSETUPREQUEST = DESCRIPTOR.message_types_by_name['UpdateCollectionClusterSetupRequest'] -_UPDATECOLLECTIONCLUSTERSETUPRESPONSE = DESCRIPTOR.message_types_by_name['UpdateCollectionClusterSetupResponse'] -_CREATESHARDKEYREQUEST = DESCRIPTOR.message_types_by_name['CreateShardKeyRequest'] -_DELETESHARDKEYREQUEST = DESCRIPTOR.message_types_by_name['DeleteShardKeyRequest'] -_CREATESHARDKEYRESPONSE = DESCRIPTOR.message_types_by_name['CreateShardKeyResponse'] -_DELETESHARDKEYRESPONSE = DESCRIPTOR.message_types_by_name['DeleteShardKeyResponse'] -_MAXOPTIMIZATIONTHREADS_SETTING = _MAXOPTIMIZATIONTHREADS.enum_types_by_name['Setting'] -_BINARYQUANTIZATIONQUERYENCODING_SETTING = _BINARYQUANTIZATIONQUERYENCODING.enum_types_by_name['Setting'] -VectorParams = _reflection.GeneratedProtocolMessageType('VectorParams', (_message.Message,), { - 'DESCRIPTOR' : _VECTORPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParams) - }) -_sym_db.RegisterMessage(VectorParams) - -VectorParamsDiff = _reflection.GeneratedProtocolMessageType('VectorParamsDiff', (_message.Message,), { - 'DESCRIPTOR' : _VECTORPARAMSDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParamsDiff) - }) -_sym_db.RegisterMessage(VectorParamsDiff) - -VectorParamsMap = _reflection.GeneratedProtocolMessageType('VectorParamsMap', (_message.Message,), { - - 'MapEntry' : _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), { - 'DESCRIPTOR' : _VECTORPARAMSMAP_MAPENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParamsMap.MapEntry) - }) - , - 'DESCRIPTOR' : _VECTORPARAMSMAP, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParamsMap) - }) -_sym_db.RegisterMessage(VectorParamsMap) -_sym_db.RegisterMessage(VectorParamsMap.MapEntry) - -VectorParamsDiffMap = _reflection.GeneratedProtocolMessageType('VectorParamsDiffMap', (_message.Message,), { - - 'MapEntry' : _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), { - 'DESCRIPTOR' : _VECTORPARAMSDIFFMAP_MAPENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParamsDiffMap.MapEntry) - }) - , - 'DESCRIPTOR' : _VECTORPARAMSDIFFMAP, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorParamsDiffMap) - }) -_sym_db.RegisterMessage(VectorParamsDiffMap) -_sym_db.RegisterMessage(VectorParamsDiffMap.MapEntry) - -VectorsConfig = _reflection.GeneratedProtocolMessageType('VectorsConfig', (_message.Message,), { - 'DESCRIPTOR' : _VECTORSCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorsConfig) - }) -_sym_db.RegisterMessage(VectorsConfig) - -VectorsConfigDiff = _reflection.GeneratedProtocolMessageType('VectorsConfigDiff', (_message.Message,), { - 'DESCRIPTOR' : _VECTORSCONFIGDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorsConfigDiff) - }) -_sym_db.RegisterMessage(VectorsConfigDiff) - -SparseVectorParams = _reflection.GeneratedProtocolMessageType('SparseVectorParams', (_message.Message,), { - 'DESCRIPTOR' : _SPARSEVECTORPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseVectorParams) - }) -_sym_db.RegisterMessage(SparseVectorParams) - -SparseVectorConfig = _reflection.GeneratedProtocolMessageType('SparseVectorConfig', (_message.Message,), { - - 'MapEntry' : _reflection.GeneratedProtocolMessageType('MapEntry', (_message.Message,), { - 'DESCRIPTOR' : _SPARSEVECTORCONFIG_MAPENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseVectorConfig.MapEntry) - }) - , - 'DESCRIPTOR' : _SPARSEVECTORCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseVectorConfig) - }) -_sym_db.RegisterMessage(SparseVectorConfig) -_sym_db.RegisterMessage(SparseVectorConfig.MapEntry) - -MultiVectorConfig = _reflection.GeneratedProtocolMessageType('MultiVectorConfig', (_message.Message,), { - 'DESCRIPTOR' : _MULTIVECTORCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MultiVectorConfig) - }) -_sym_db.RegisterMessage(MultiVectorConfig) - -GetCollectionInfoRequest = _reflection.GeneratedProtocolMessageType('GetCollectionInfoRequest', (_message.Message,), { - 'DESCRIPTOR' : _GETCOLLECTIONINFOREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GetCollectionInfoRequest) - }) -_sym_db.RegisterMessage(GetCollectionInfoRequest) - -CollectionExistsRequest = _reflection.GeneratedProtocolMessageType('CollectionExistsRequest', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONEXISTSREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionExistsRequest) - }) -_sym_db.RegisterMessage(CollectionExistsRequest) - -CollectionExists = _reflection.GeneratedProtocolMessageType('CollectionExists', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONEXISTS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionExists) - }) -_sym_db.RegisterMessage(CollectionExists) - -CollectionExistsResponse = _reflection.GeneratedProtocolMessageType('CollectionExistsResponse', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONEXISTSRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionExistsResponse) - }) -_sym_db.RegisterMessage(CollectionExistsResponse) - -ListCollectionsRequest = _reflection.GeneratedProtocolMessageType('ListCollectionsRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTCOLLECTIONSREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListCollectionsRequest) - }) -_sym_db.RegisterMessage(ListCollectionsRequest) - -CollectionDescription = _reflection.GeneratedProtocolMessageType('CollectionDescription', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONDESCRIPTION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionDescription) - }) -_sym_db.RegisterMessage(CollectionDescription) - -GetCollectionInfoResponse = _reflection.GeneratedProtocolMessageType('GetCollectionInfoResponse', (_message.Message,), { - 'DESCRIPTOR' : _GETCOLLECTIONINFORESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GetCollectionInfoResponse) - }) -_sym_db.RegisterMessage(GetCollectionInfoResponse) - -ListCollectionsResponse = _reflection.GeneratedProtocolMessageType('ListCollectionsResponse', (_message.Message,), { - 'DESCRIPTOR' : _LISTCOLLECTIONSRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListCollectionsResponse) - }) -_sym_db.RegisterMessage(ListCollectionsResponse) - -MaxOptimizationThreads = _reflection.GeneratedProtocolMessageType('MaxOptimizationThreads', (_message.Message,), { - 'DESCRIPTOR' : _MAXOPTIMIZATIONTHREADS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MaxOptimizationThreads) - }) -_sym_db.RegisterMessage(MaxOptimizationThreads) - -OptimizerStatus = _reflection.GeneratedProtocolMessageType('OptimizerStatus', (_message.Message,), { - 'DESCRIPTOR' : _OPTIMIZERSTATUS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.OptimizerStatus) - }) -_sym_db.RegisterMessage(OptimizerStatus) - -HnswConfigDiff = _reflection.GeneratedProtocolMessageType('HnswConfigDiff', (_message.Message,), { - 'DESCRIPTOR' : _HNSWCONFIGDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HnswConfigDiff) - }) -_sym_db.RegisterMessage(HnswConfigDiff) - -SparseIndexConfig = _reflection.GeneratedProtocolMessageType('SparseIndexConfig', (_message.Message,), { - 'DESCRIPTOR' : _SPARSEINDEXCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseIndexConfig) - }) -_sym_db.RegisterMessage(SparseIndexConfig) - -WalConfigDiff = _reflection.GeneratedProtocolMessageType('WalConfigDiff', (_message.Message,), { - 'DESCRIPTOR' : _WALCONFIGDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.WalConfigDiff) - }) -_sym_db.RegisterMessage(WalConfigDiff) - -OptimizersConfigDiff = _reflection.GeneratedProtocolMessageType('OptimizersConfigDiff', (_message.Message,), { - 'DESCRIPTOR' : _OPTIMIZERSCONFIGDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.OptimizersConfigDiff) - }) -_sym_db.RegisterMessage(OptimizersConfigDiff) - -ScalarQuantization = _reflection.GeneratedProtocolMessageType('ScalarQuantization', (_message.Message,), { - 'DESCRIPTOR' : _SCALARQUANTIZATION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ScalarQuantization) - }) -_sym_db.RegisterMessage(ScalarQuantization) - -ProductQuantization = _reflection.GeneratedProtocolMessageType('ProductQuantization', (_message.Message,), { - 'DESCRIPTOR' : _PRODUCTQUANTIZATION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ProductQuantization) - }) -_sym_db.RegisterMessage(ProductQuantization) - -BinaryQuantizationQueryEncoding = _reflection.GeneratedProtocolMessageType('BinaryQuantizationQueryEncoding', (_message.Message,), { - 'DESCRIPTOR' : _BINARYQUANTIZATIONQUERYENCODING, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.BinaryQuantizationQueryEncoding) - }) -_sym_db.RegisterMessage(BinaryQuantizationQueryEncoding) - -BinaryQuantization = _reflection.GeneratedProtocolMessageType('BinaryQuantization', (_message.Message,), { - 'DESCRIPTOR' : _BINARYQUANTIZATION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.BinaryQuantization) - }) -_sym_db.RegisterMessage(BinaryQuantization) - -QuantizationConfig = _reflection.GeneratedProtocolMessageType('QuantizationConfig', (_message.Message,), { - 'DESCRIPTOR' : _QUANTIZATIONCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QuantizationConfig) - }) -_sym_db.RegisterMessage(QuantizationConfig) - -Disabled = _reflection.GeneratedProtocolMessageType('Disabled', (_message.Message,), { - 'DESCRIPTOR' : _DISABLED, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Disabled) - }) -_sym_db.RegisterMessage(Disabled) - -QuantizationConfigDiff = _reflection.GeneratedProtocolMessageType('QuantizationConfigDiff', (_message.Message,), { - 'DESCRIPTOR' : _QUANTIZATIONCONFIGDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QuantizationConfigDiff) - }) -_sym_db.RegisterMessage(QuantizationConfigDiff) - -StrictModeConfig = _reflection.GeneratedProtocolMessageType('StrictModeConfig', (_message.Message,), { - 'DESCRIPTOR' : _STRICTMODECONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeConfig) - }) -_sym_db.RegisterMessage(StrictModeConfig) - -StrictModeSparseConfig = _reflection.GeneratedProtocolMessageType('StrictModeSparseConfig', (_message.Message,), { - - 'SparseConfigEntry' : _reflection.GeneratedProtocolMessageType('SparseConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _STRICTMODESPARSECONFIG_SPARSECONFIGENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeSparseConfig.SparseConfigEntry) - }) - , - 'DESCRIPTOR' : _STRICTMODESPARSECONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeSparseConfig) - }) -_sym_db.RegisterMessage(StrictModeSparseConfig) -_sym_db.RegisterMessage(StrictModeSparseConfig.SparseConfigEntry) - -StrictModeSparse = _reflection.GeneratedProtocolMessageType('StrictModeSparse', (_message.Message,), { - 'DESCRIPTOR' : _STRICTMODESPARSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeSparse) - }) -_sym_db.RegisterMessage(StrictModeSparse) - -StrictModeMultivectorConfig = _reflection.GeneratedProtocolMessageType('StrictModeMultivectorConfig', (_message.Message,), { - - 'MultivectorConfigEntry' : _reflection.GeneratedProtocolMessageType('MultivectorConfigEntry', (_message.Message,), { - 'DESCRIPTOR' : _STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeMultivectorConfig.MultivectorConfigEntry) - }) - , - 'DESCRIPTOR' : _STRICTMODEMULTIVECTORCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeMultivectorConfig) - }) -_sym_db.RegisterMessage(StrictModeMultivectorConfig) -_sym_db.RegisterMessage(StrictModeMultivectorConfig.MultivectorConfigEntry) - -StrictModeMultivector = _reflection.GeneratedProtocolMessageType('StrictModeMultivector', (_message.Message,), { - 'DESCRIPTOR' : _STRICTMODEMULTIVECTOR, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StrictModeMultivector) - }) -_sym_db.RegisterMessage(StrictModeMultivector) - -CreateCollection = _reflection.GeneratedProtocolMessageType('CreateCollection', (_message.Message,), { - 'DESCRIPTOR' : _CREATECOLLECTION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateCollection) - }) -_sym_db.RegisterMessage(CreateCollection) - -UpdateCollection = _reflection.GeneratedProtocolMessageType('UpdateCollection', (_message.Message,), { - 'DESCRIPTOR' : _UPDATECOLLECTION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateCollection) - }) -_sym_db.RegisterMessage(UpdateCollection) - -DeleteCollection = _reflection.GeneratedProtocolMessageType('DeleteCollection', (_message.Message,), { - 'DESCRIPTOR' : _DELETECOLLECTION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteCollection) - }) -_sym_db.RegisterMessage(DeleteCollection) - -CollectionOperationResponse = _reflection.GeneratedProtocolMessageType('CollectionOperationResponse', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONOPERATIONRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionOperationResponse) - }) -_sym_db.RegisterMessage(CollectionOperationResponse) - -CollectionParams = _reflection.GeneratedProtocolMessageType('CollectionParams', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionParams) - }) -_sym_db.RegisterMessage(CollectionParams) - -CollectionParamsDiff = _reflection.GeneratedProtocolMessageType('CollectionParamsDiff', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONPARAMSDIFF, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionParamsDiff) - }) -_sym_db.RegisterMessage(CollectionParamsDiff) - -CollectionConfig = _reflection.GeneratedProtocolMessageType('CollectionConfig', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONCONFIG, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionConfig) - }) -_sym_db.RegisterMessage(CollectionConfig) - -KeywordIndexParams = _reflection.GeneratedProtocolMessageType('KeywordIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _KEYWORDINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.KeywordIndexParams) - }) -_sym_db.RegisterMessage(KeywordIndexParams) - -IntegerIndexParams = _reflection.GeneratedProtocolMessageType('IntegerIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _INTEGERINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.IntegerIndexParams) - }) -_sym_db.RegisterMessage(IntegerIndexParams) - -FloatIndexParams = _reflection.GeneratedProtocolMessageType('FloatIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _FLOATINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FloatIndexParams) - }) -_sym_db.RegisterMessage(FloatIndexParams) - -GeoIndexParams = _reflection.GeneratedProtocolMessageType('GeoIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _GEOINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoIndexParams) - }) -_sym_db.RegisterMessage(GeoIndexParams) - -StopwordsSet = _reflection.GeneratedProtocolMessageType('StopwordsSet', (_message.Message,), { - 'DESCRIPTOR' : _STOPWORDSSET, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StopwordsSet) - }) -_sym_db.RegisterMessage(StopwordsSet) - -TextIndexParams = _reflection.GeneratedProtocolMessageType('TextIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _TEXTINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.TextIndexParams) - }) -_sym_db.RegisterMessage(TextIndexParams) - -StemmingAlgorithm = _reflection.GeneratedProtocolMessageType('StemmingAlgorithm', (_message.Message,), { - 'DESCRIPTOR' : _STEMMINGALGORITHM, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StemmingAlgorithm) - }) -_sym_db.RegisterMessage(StemmingAlgorithm) - -SnowballParams = _reflection.GeneratedProtocolMessageType('SnowballParams', (_message.Message,), { - 'DESCRIPTOR' : _SNOWBALLPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SnowballParams) - }) -_sym_db.RegisterMessage(SnowballParams) - -BoolIndexParams = _reflection.GeneratedProtocolMessageType('BoolIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _BOOLINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.BoolIndexParams) - }) -_sym_db.RegisterMessage(BoolIndexParams) - -DatetimeIndexParams = _reflection.GeneratedProtocolMessageType('DatetimeIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _DATETIMEINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DatetimeIndexParams) - }) -_sym_db.RegisterMessage(DatetimeIndexParams) - -UuidIndexParams = _reflection.GeneratedProtocolMessageType('UuidIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _UUIDINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UuidIndexParams) - }) -_sym_db.RegisterMessage(UuidIndexParams) - -PayloadIndexParams = _reflection.GeneratedProtocolMessageType('PayloadIndexParams', (_message.Message,), { - 'DESCRIPTOR' : _PAYLOADINDEXPARAMS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PayloadIndexParams) - }) -_sym_db.RegisterMessage(PayloadIndexParams) - -PayloadSchemaInfo = _reflection.GeneratedProtocolMessageType('PayloadSchemaInfo', (_message.Message,), { - 'DESCRIPTOR' : _PAYLOADSCHEMAINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PayloadSchemaInfo) - }) -_sym_db.RegisterMessage(PayloadSchemaInfo) - -CollectionInfo = _reflection.GeneratedProtocolMessageType('CollectionInfo', (_message.Message,), { - - 'PayloadSchemaEntry' : _reflection.GeneratedProtocolMessageType('PayloadSchemaEntry', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONINFO_PAYLOADSCHEMAENTRY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionInfo.PayloadSchemaEntry) - }) - , - 'DESCRIPTOR' : _COLLECTIONINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionInfo) - }) -_sym_db.RegisterMessage(CollectionInfo) -_sym_db.RegisterMessage(CollectionInfo.PayloadSchemaEntry) - -ChangeAliases = _reflection.GeneratedProtocolMessageType('ChangeAliases', (_message.Message,), { - 'DESCRIPTOR' : _CHANGEALIASES, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ChangeAliases) - }) -_sym_db.RegisterMessage(ChangeAliases) - -AliasOperations = _reflection.GeneratedProtocolMessageType('AliasOperations', (_message.Message,), { - 'DESCRIPTOR' : _ALIASOPERATIONS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.AliasOperations) - }) -_sym_db.RegisterMessage(AliasOperations) - -CreateAlias = _reflection.GeneratedProtocolMessageType('CreateAlias', (_message.Message,), { - 'DESCRIPTOR' : _CREATEALIAS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateAlias) - }) -_sym_db.RegisterMessage(CreateAlias) - -RenameAlias = _reflection.GeneratedProtocolMessageType('RenameAlias', (_message.Message,), { - 'DESCRIPTOR' : _RENAMEALIAS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RenameAlias) - }) -_sym_db.RegisterMessage(RenameAlias) - -DeleteAlias = _reflection.GeneratedProtocolMessageType('DeleteAlias', (_message.Message,), { - 'DESCRIPTOR' : _DELETEALIAS, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteAlias) - }) -_sym_db.RegisterMessage(DeleteAlias) - -ListAliasesRequest = _reflection.GeneratedProtocolMessageType('ListAliasesRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTALIASESREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListAliasesRequest) - }) -_sym_db.RegisterMessage(ListAliasesRequest) - -ListCollectionAliasesRequest = _reflection.GeneratedProtocolMessageType('ListCollectionAliasesRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTCOLLECTIONALIASESREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListCollectionAliasesRequest) - }) -_sym_db.RegisterMessage(ListCollectionAliasesRequest) - -AliasDescription = _reflection.GeneratedProtocolMessageType('AliasDescription', (_message.Message,), { - 'DESCRIPTOR' : _ALIASDESCRIPTION, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.AliasDescription) - }) -_sym_db.RegisterMessage(AliasDescription) - -ListAliasesResponse = _reflection.GeneratedProtocolMessageType('ListAliasesResponse', (_message.Message,), { - 'DESCRIPTOR' : _LISTALIASESRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListAliasesResponse) - }) -_sym_db.RegisterMessage(ListAliasesResponse) - -CollectionClusterInfoRequest = _reflection.GeneratedProtocolMessageType('CollectionClusterInfoRequest', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONCLUSTERINFOREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionClusterInfoRequest) - }) -_sym_db.RegisterMessage(CollectionClusterInfoRequest) - -ShardKey = _reflection.GeneratedProtocolMessageType('ShardKey', (_message.Message,), { - 'DESCRIPTOR' : _SHARDKEY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ShardKey) - }) -_sym_db.RegisterMessage(ShardKey) - -LocalShardInfo = _reflection.GeneratedProtocolMessageType('LocalShardInfo', (_message.Message,), { - 'DESCRIPTOR' : _LOCALSHARDINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.LocalShardInfo) - }) -_sym_db.RegisterMessage(LocalShardInfo) - -RemoteShardInfo = _reflection.GeneratedProtocolMessageType('RemoteShardInfo', (_message.Message,), { - 'DESCRIPTOR' : _REMOTESHARDINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RemoteShardInfo) - }) -_sym_db.RegisterMessage(RemoteShardInfo) - -ShardTransferInfo = _reflection.GeneratedProtocolMessageType('ShardTransferInfo', (_message.Message,), { - 'DESCRIPTOR' : _SHARDTRANSFERINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ShardTransferInfo) - }) -_sym_db.RegisterMessage(ShardTransferInfo) - -ReshardingInfo = _reflection.GeneratedProtocolMessageType('ReshardingInfo', (_message.Message,), { - 'DESCRIPTOR' : _RESHARDINGINFO, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ReshardingInfo) - }) -_sym_db.RegisterMessage(ReshardingInfo) - -CollectionClusterInfoResponse = _reflection.GeneratedProtocolMessageType('CollectionClusterInfoResponse', (_message.Message,), { - 'DESCRIPTOR' : _COLLECTIONCLUSTERINFORESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CollectionClusterInfoResponse) - }) -_sym_db.RegisterMessage(CollectionClusterInfoResponse) - -MoveShard = _reflection.GeneratedProtocolMessageType('MoveShard', (_message.Message,), { - 'DESCRIPTOR' : _MOVESHARD, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MoveShard) - }) -_sym_db.RegisterMessage(MoveShard) - -ReplicateShard = _reflection.GeneratedProtocolMessageType('ReplicateShard', (_message.Message,), { - 'DESCRIPTOR' : _REPLICATESHARD, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ReplicateShard) - }) -_sym_db.RegisterMessage(ReplicateShard) - -AbortShardTransfer = _reflection.GeneratedProtocolMessageType('AbortShardTransfer', (_message.Message,), { - 'DESCRIPTOR' : _ABORTSHARDTRANSFER, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.AbortShardTransfer) - }) -_sym_db.RegisterMessage(AbortShardTransfer) - -RestartTransfer = _reflection.GeneratedProtocolMessageType('RestartTransfer', (_message.Message,), { - 'DESCRIPTOR' : _RESTARTTRANSFER, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RestartTransfer) - }) -_sym_db.RegisterMessage(RestartTransfer) - -Replica = _reflection.GeneratedProtocolMessageType('Replica', (_message.Message,), { - 'DESCRIPTOR' : _REPLICA, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Replica) - }) -_sym_db.RegisterMessage(Replica) - -CreateShardKey = _reflection.GeneratedProtocolMessageType('CreateShardKey', (_message.Message,), { - 'DESCRIPTOR' : _CREATESHARDKEY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateShardKey) - }) -_sym_db.RegisterMessage(CreateShardKey) - -DeleteShardKey = _reflection.GeneratedProtocolMessageType('DeleteShardKey', (_message.Message,), { - 'DESCRIPTOR' : _DELETESHARDKEY, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteShardKey) - }) -_sym_db.RegisterMessage(DeleteShardKey) - -UpdateCollectionClusterSetupRequest = _reflection.GeneratedProtocolMessageType('UpdateCollectionClusterSetupRequest', (_message.Message,), { - 'DESCRIPTOR' : _UPDATECOLLECTIONCLUSTERSETUPREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateCollectionClusterSetupRequest) - }) -_sym_db.RegisterMessage(UpdateCollectionClusterSetupRequest) - -UpdateCollectionClusterSetupResponse = _reflection.GeneratedProtocolMessageType('UpdateCollectionClusterSetupResponse', (_message.Message,), { - 'DESCRIPTOR' : _UPDATECOLLECTIONCLUSTERSETUPRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateCollectionClusterSetupResponse) - }) -_sym_db.RegisterMessage(UpdateCollectionClusterSetupResponse) - -CreateShardKeyRequest = _reflection.GeneratedProtocolMessageType('CreateShardKeyRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATESHARDKEYREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateShardKeyRequest) - }) -_sym_db.RegisterMessage(CreateShardKeyRequest) - -DeleteShardKeyRequest = _reflection.GeneratedProtocolMessageType('DeleteShardKeyRequest', (_message.Message,), { - 'DESCRIPTOR' : _DELETESHARDKEYREQUEST, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteShardKeyRequest) - }) -_sym_db.RegisterMessage(DeleteShardKeyRequest) - -CreateShardKeyResponse = _reflection.GeneratedProtocolMessageType('CreateShardKeyResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATESHARDKEYRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateShardKeyResponse) - }) -_sym_db.RegisterMessage(CreateShardKeyResponse) - -DeleteShardKeyResponse = _reflection.GeneratedProtocolMessageType('DeleteShardKeyResponse', (_message.Message,), { - 'DESCRIPTOR' : _DELETESHARDKEYRESPONSE, - '__module__' : 'collections_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteShardKeyResponse) - }) -_sym_db.RegisterMessage(DeleteShardKeyResponse) +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x11\x63ollections.proto\x12\x06qdrant\x1a\x13json_with_int.proto\"\x83\x03\n\x0cVectorParams\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\"\n\x08\x64istance\x18\x02 \x01(\x0e\x32\x10.qdrant.Distance\x12\x30\n\x0bhnsw_config\x18\x03 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12<\n\x13quantization_config\x18\x04 \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\x01\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x02\x88\x01\x01\x12\'\n\x08\x64\x61tatype\x18\x06 \x01(\x0e\x32\x10.qdrant.DatatypeH\x03\x88\x01\x01\x12:\n\x12multivector_config\x18\x07 \x01(\x0b\x32\x19.qdrant.MultiVectorConfigH\x04\x88\x01\x01\x42\x0e\n\x0c_hnsw_configB\x16\n\x14_quantization_configB\n\n\x08_on_diskB\x0b\n\t_datatypeB\x15\n\x13_multivector_config\"\xd0\x01\n\x10VectorParamsDiff\x12\x30\n\x0bhnsw_config\x18\x01 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12@\n\x13quantization_config\x18\x02 \x01(\x0b\x32\x1e.qdrant.QuantizationConfigDiffH\x01\x88\x01\x01\x12\x14\n\x07on_disk\x18\x03 \x01(\x08H\x02\x88\x01\x01\x42\x0e\n\x0c_hnsw_configB\x16\n\x14_quantization_configB\n\n\x08_on_disk\"\x82\x01\n\x0fVectorParamsMap\x12-\n\x03map\x18\x01 \x03(\x0b\x32 .qdrant.VectorParamsMap.MapEntry\x1a@\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.qdrant.VectorParams:\x02\x38\x01\"\x8e\x01\n\x13VectorParamsDiffMap\x12\x31\n\x03map\x18\x01 \x03(\x0b\x32$.qdrant.VectorParamsDiffMap.MapEntry\x1a\x44\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.qdrant.VectorParamsDiff:\x02\x38\x01\"p\n\rVectorsConfig\x12&\n\x06params\x18\x01 \x01(\x0b\x32\x14.qdrant.VectorParamsH\x00\x12-\n\nparams_map\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorParamsMapH\x00\x42\x08\n\x06\x63onfig\"|\n\x11VectorsConfigDiff\x12*\n\x06params\x18\x01 \x01(\x0b\x32\x18.qdrant.VectorParamsDiffH\x00\x12\x31\n\nparams_map\x18\x02 \x01(\x0b\x32\x1b.qdrant.VectorParamsDiffMapH\x00\x42\x08\n\x06\x63onfig\"\x83\x01\n\x12SparseVectorParams\x12-\n\x05index\x18\x01 \x01(\x0b\x32\x19.qdrant.SparseIndexConfigH\x00\x88\x01\x01\x12\'\n\x08modifier\x18\x02 \x01(\x0e\x32\x10.qdrant.ModifierH\x01\x88\x01\x01\x42\x08\n\x06_indexB\x0b\n\t_modifier\"\x8e\x01\n\x12SparseVectorConfig\x12\x30\n\x03map\x18\x01 \x03(\x0b\x32#.qdrant.SparseVectorConfig.MapEntry\x1a\x46\n\x08MapEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.qdrant.SparseVectorParams:\x02\x38\x01\"F\n\x11MultiVectorConfig\x12\x31\n\ncomparator\x18\x01 \x01(\x0e\x32\x1d.qdrant.MultiVectorComparator\"3\n\x18GetCollectionInfoRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"2\n\x17\x43ollectionExistsRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"\"\n\x10\x43ollectionExists\x12\x0e\n\x06\x65xists\x18\x01 \x01(\x08\"R\n\x18\x43ollectionExistsResponse\x12(\n\x06result\x18\x01 \x01(\x0b\x32\x18.qdrant.CollectionExists\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\x18\n\x16ListCollectionsRequest\"%\n\x15\x43ollectionDescription\x12\x0c\n\x04name\x18\x01 \x01(\t\"Q\n\x19GetCollectionInfoResponse\x12&\n\x06result\x18\x01 \x01(\x0b\x32\x16.qdrant.CollectionInfo\x12\x0c\n\x04time\x18\x02 \x01(\x01\"[\n\x17ListCollectionsResponse\x12\x32\n\x0b\x63ollections\x18\x01 \x03(\x0b\x32\x1d.qdrant.CollectionDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\x84\x01\n\x16MaxOptimizationThreads\x12\x0f\n\x05value\x18\x01 \x01(\x04H\x00\x12\x39\n\x07setting\x18\x02 \x01(\x0e\x32&.qdrant.MaxOptimizationThreads.SettingH\x00\"\x13\n\x07Setting\x12\x08\n\x04\x41uto\x10\x00\x42\t\n\x07variant\",\n\x0fOptimizerStatus\x12\n\n\x02ok\x18\x01 \x01(\x08\x12\r\n\x05\x65rror\x18\x02 \x01(\t\"$\n\x11\x43ollectionWarning\x12\x0f\n\x07message\x18\x01 \x01(\t\"\xc0\x02\n\x0eHnswConfigDiff\x12\x0e\n\x01m\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x19\n\x0c\x65\x66_construct\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12 \n\x13\x66ull_scan_threshold\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12!\n\x14max_indexing_threads\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x16\n\tpayload_m\x18\x06 \x01(\x04H\x05\x88\x01\x01\x12\x1b\n\x0einline_storage\x18\x07 \x01(\x08H\x06\x88\x01\x01\x42\x04\n\x02_mB\x0f\n\r_ef_constructB\x16\n\x14_full_scan_thresholdB\x17\n\x15_max_indexing_threadsB\n\n\x08_on_diskB\x0c\n\n_payload_mB\x11\n\x0f_inline_storage\"\xa5\x01\n\x11SparseIndexConfig\x12 \n\x13\x66ull_scan_threshold\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\'\n\x08\x64\x61tatype\x18\x03 \x01(\x0e\x32\x10.qdrant.DatatypeH\x02\x88\x01\x01\x42\x16\n\x14_full_scan_thresholdB\n\n\x08_on_diskB\x0b\n\t_datatype\"\xaf\x01\n\rWalConfigDiff\x12\x1c\n\x0fwal_capacity_mb\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x1f\n\x12wal_segments_ahead\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x1e\n\x11wal_retain_closed\x18\x03 \x01(\x04H\x02\x88\x01\x01\x42\x12\n\x10_wal_capacity_mbB\x15\n\x13_wal_segments_aheadB\x14\n\x12_wal_retain_closed\"\xe6\x04\n\x14OptimizersConfigDiff\x12\x1e\n\x11\x64\x65leted_threshold\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12%\n\x18vacuum_min_vector_number\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12#\n\x16\x64\x65\x66\x61ult_segment_number\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12\x1d\n\x10max_segment_size\x18\x04 \x01(\x04H\x03\x88\x01\x01\x12\x1d\n\x10memmap_threshold\x18\x05 \x01(\x04H\x04\x88\x01\x01\x12\x1f\n\x12indexing_threshold\x18\x06 \x01(\x04H\x05\x88\x01\x01\x12\x1f\n\x12\x66lush_interval_sec\x18\x07 \x01(\x04H\x06\x88\x01\x01\x12\x30\n#deprecated_max_optimization_threads\x18\x08 \x01(\x04H\x07\x88\x01\x01\x12\x45\n\x18max_optimization_threads\x18\t \x01(\x0b\x32\x1e.qdrant.MaxOptimizationThreadsH\x08\x88\x01\x01\x42\x14\n\x12_deleted_thresholdB\x1b\n\x19_vacuum_min_vector_numberB\x19\n\x17_default_segment_numberB\x13\n\x11_max_segment_sizeB\x13\n\x11_memmap_thresholdB\x15\n\x13_indexing_thresholdB\x15\n\x13_flush_interval_secB&\n$_deprecated_max_optimization_threadsB\x1b\n\x19_max_optimization_threads\"\x88\x01\n\x12ScalarQuantization\x12&\n\x04type\x18\x01 \x01(\x0e\x32\x18.qdrant.QuantizationType\x12\x15\n\x08quantile\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x17\n\nalways_ram\x18\x03 \x01(\x08H\x01\x88\x01\x01\x42\x0b\n\t_quantileB\r\n\x0b_always_ram\"l\n\x13ProductQuantization\x12-\n\x0b\x63ompression\x18\x01 \x01(\x0e\x32\x18.qdrant.CompressionRatio\x12\x17\n\nalways_ram\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\r\n\x0b_always_ram\"\xb6\x01\n\x1f\x42inaryQuantizationQueryEncoding\x12\x42\n\x07setting\x18\x04 \x01(\x0e\x32/.qdrant.BinaryQuantizationQueryEncoding.SettingH\x00\"D\n\x07Setting\x12\x0b\n\x07\x44\x65\x66\x61ult\x10\x00\x12\n\n\x06\x42inary\x10\x01\x12\x0f\n\x0bScalar4Bits\x10\x02\x12\x0f\n\x0bScalar8Bits\x10\x03\x42\t\n\x07variant\"\xdd\x01\n\x12\x42inaryQuantization\x12\x17\n\nalways_ram\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x39\n\x08\x65ncoding\x18\x02 \x01(\x0e\x32\".qdrant.BinaryQuantizationEncodingH\x01\x88\x01\x01\x12\x44\n\x0equery_encoding\x18\x03 \x01(\x0b\x32\'.qdrant.BinaryQuantizationQueryEncodingH\x02\x88\x01\x01\x42\r\n\x0b_always_ramB\x0b\n\t_encodingB\x11\n\x0f_query_encoding\"\xb0\x01\n\x12QuantizationConfig\x12,\n\x06scalar\x18\x01 \x01(\x0b\x32\x1a.qdrant.ScalarQuantizationH\x00\x12.\n\x07product\x18\x02 \x01(\x0b\x32\x1b.qdrant.ProductQuantizationH\x00\x12,\n\x06\x62inary\x18\x03 \x01(\x0b\x32\x1a.qdrant.BinaryQuantizationH\x00\x42\x0e\n\x0cquantization\"\n\n\x08\x44isabled\"\xda\x01\n\x16QuantizationConfigDiff\x12,\n\x06scalar\x18\x01 \x01(\x0b\x32\x1a.qdrant.ScalarQuantizationH\x00\x12.\n\x07product\x18\x02 \x01(\x0b\x32\x1b.qdrant.ProductQuantizationH\x00\x12$\n\x08\x64isabled\x18\x03 \x01(\x0b\x32\x10.qdrant.DisabledH\x00\x12,\n\x06\x62inary\x18\x04 \x01(\x0b\x32\x1a.qdrant.BinaryQuantizationH\x00\x42\x0e\n\x0cquantization\"\xb9\t\n\x10StrictModeConfig\x12\x14\n\x07\x65nabled\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x1c\n\x0fmax_query_limit\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x18\n\x0bmax_timeout\x18\x03 \x01(\rH\x02\x88\x01\x01\x12)\n\x1cunindexed_filtering_retrieve\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12\'\n\x1aunindexed_filtering_update\x18\x05 \x01(\x08H\x04\x88\x01\x01\x12\x1f\n\x12search_max_hnsw_ef\x18\x06 \x01(\rH\x05\x88\x01\x01\x12\x1f\n\x12search_allow_exact\x18\x07 \x01(\x08H\x06\x88\x01\x01\x12$\n\x17search_max_oversampling\x18\x08 \x01(\x02H\x07\x88\x01\x01\x12!\n\x14upsert_max_batchsize\x18\t \x01(\x04H\x08\x88\x01\x01\x12-\n max_collection_vector_size_bytes\x18\n \x01(\x04H\t\x88\x01\x01\x12\x1c\n\x0fread_rate_limit\x18\x0b \x01(\rH\n\x88\x01\x01\x12\x1d\n\x10write_rate_limit\x18\x0c \x01(\rH\x0b\x88\x01\x01\x12.\n!max_collection_payload_size_bytes\x18\r \x01(\x04H\x0c\x88\x01\x01\x12\"\n\x15\x66ilter_max_conditions\x18\x0e \x01(\x04H\r\x88\x01\x01\x12\x1f\n\x12\x63ondition_max_size\x18\x0f \x01(\x04H\x0e\x88\x01\x01\x12\x44\n\x12multivector_config\x18\x10 \x01(\x0b\x32#.qdrant.StrictModeMultivectorConfigH\x0f\x88\x01\x01\x12:\n\rsparse_config\x18\x11 \x01(\x0b\x32\x1e.qdrant.StrictModeSparseConfigH\x10\x88\x01\x01\x12\x1d\n\x10max_points_count\x18\x12 \x01(\x04H\x11\x88\x01\x01\x12$\n\x17max_payload_index_count\x18\x13 \x01(\x04H\x12\x88\x01\x01\x42\n\n\x08_enabledB\x12\n\x10_max_query_limitB\x0e\n\x0c_max_timeoutB\x1f\n\x1d_unindexed_filtering_retrieveB\x1d\n\x1b_unindexed_filtering_updateB\x15\n\x13_search_max_hnsw_efB\x15\n\x13_search_allow_exactB\x1a\n\x18_search_max_oversamplingB\x17\n\x15_upsert_max_batchsizeB#\n!_max_collection_vector_size_bytesB\x12\n\x10_read_rate_limitB\x13\n\x11_write_rate_limitB$\n\"_max_collection_payload_size_bytesB\x18\n\x16_filter_max_conditionsB\x15\n\x13_condition_max_sizeB\x15\n\x13_multivector_configB\x10\n\x0e_sparse_configB\x13\n\x11_max_points_countB\x1a\n\x18_max_payload_index_count\"\xb0\x01\n\x16StrictModeSparseConfig\x12G\n\rsparse_config\x18\x01 \x03(\x0b\x32\x30.qdrant.StrictModeSparseConfig.SparseConfigEntry\x1aM\n\x11SparseConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.qdrant.StrictModeSparse:\x02\x38\x01\":\n\x10StrictModeSparse\x12\x17\n\nmax_length\x18\n \x01(\x04H\x00\x88\x01\x01\x42\r\n\x0b_max_length\"\xce\x01\n\x1bStrictModeMultivectorConfig\x12V\n\x12multivector_config\x18\x01 \x03(\x0b\x32:.qdrant.StrictModeMultivectorConfig.MultivectorConfigEntry\x1aW\n\x16MultivectorConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12,\n\x05value\x18\x02 \x01(\x0b\x32\x1d.qdrant.StrictModeMultivector:\x02\x38\x01\"A\n\x15StrictModeMultivector\x12\x18\n\x0bmax_vectors\x18\x01 \x01(\x04H\x00\x88\x01\x01\x42\x0e\n\x0c_max_vectors\"\x9b\x08\n\x10\x43reateCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x30\n\x0bhnsw_config\x18\x04 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x00\x88\x01\x01\x12.\n\nwal_config\x18\x05 \x01(\x0b\x32\x15.qdrant.WalConfigDiffH\x01\x88\x01\x01\x12<\n\x11optimizers_config\x18\x06 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiffH\x02\x88\x01\x01\x12\x19\n\x0cshard_number\x18\x07 \x01(\rH\x03\x88\x01\x01\x12\x1c\n\x0fon_disk_payload\x18\x08 \x01(\x08H\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\t \x01(\x04H\x05\x88\x01\x01\x12\x32\n\x0evectors_config\x18\n \x01(\x0b\x32\x15.qdrant.VectorsConfigH\x06\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x0b \x01(\rH\x07\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x0c \x01(\rH\x08\x88\x01\x01\x12<\n\x13quantization_config\x18\x0e \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\t\x88\x01\x01\x12\x34\n\x0fsharding_method\x18\x0f \x01(\x0e\x32\x16.qdrant.ShardingMethodH\n\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\x10 \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x0b\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\x11 \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\x0c\x88\x01\x01\x12\x38\n\x08metadata\x18\x12 \x03(\x0b\x32&.qdrant.CreateCollection.MetadataEntry\x1a>\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x0e\n\x0c_hnsw_configB\r\n\x0b_wal_configB\x14\n\x12_optimizers_configB\x0f\n\r_shard_numberB\x12\n\x10_on_disk_payloadB\n\n\x08_timeoutB\x11\n\x0f_vectors_configB\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x16\n\x14_quantization_configB\x12\n\x10_sharding_methodB\x18\n\x16_sparse_vectors_configB\x15\n\x13_strict_mode_configJ\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\r\x10\x0e\"\xec\x05\n\x10UpdateCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12<\n\x11optimizers_config\x18\x02 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiffH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x31\n\x06params\x18\x04 \x01(\x0b\x32\x1c.qdrant.CollectionParamsDiffH\x02\x88\x01\x01\x12\x30\n\x0bhnsw_config\x18\x05 \x01(\x0b\x32\x16.qdrant.HnswConfigDiffH\x03\x88\x01\x01\x12\x36\n\x0evectors_config\x18\x06 \x01(\x0b\x32\x19.qdrant.VectorsConfigDiffH\x04\x88\x01\x01\x12@\n\x13quantization_config\x18\x07 \x01(\x0b\x32\x1e.qdrant.QuantizationConfigDiffH\x05\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\x08 \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x06\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\t \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\x07\x88\x01\x01\x12\x38\n\x08metadata\x18\n \x03(\x0b\x32&.qdrant.UpdateCollection.MetadataEntry\x1a>\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x14\n\x12_optimizers_configB\n\n\x08_timeoutB\t\n\x07_paramsB\x0e\n\x0c_hnsw_configB\x11\n\x0f_vectors_configB\x16\n\x14_quantization_configB\x18\n\x16_sparse_vectors_configB\x15\n\x13_strict_mode_config\"M\n\x10\x44\x65leteCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x14\n\x07timeout\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\";\n\x1b\x43ollectionOperationResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\xee\x03\n\x10\x43ollectionParams\x12\x14\n\x0cshard_number\x18\x03 \x01(\r\x12\x17\n\x0fon_disk_payload\x18\x04 \x01(\x08\x12\x32\n\x0evectors_config\x18\x05 \x01(\x0b\x32\x15.qdrant.VectorsConfigH\x00\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x06 \x01(\rH\x01\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x07 \x01(\rH\x02\x88\x01\x01\x12 \n\x13read_fan_out_factor\x18\x08 \x01(\rH\x03\x88\x01\x01\x12\x34\n\x0fsharding_method\x18\t \x01(\x0e\x32\x16.qdrant.ShardingMethodH\x04\x88\x01\x01\x12>\n\x15sparse_vectors_config\x18\n \x01(\x0b\x32\x1a.qdrant.SparseVectorConfigH\x05\x88\x01\x01\x42\x11\n\x0f_vectors_configB\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x16\n\x14_read_fan_out_factorB\x12\n\x10_sharding_methodB\x18\n\x16_sparse_vectors_configJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03\"\xfe\x01\n\x14\x43ollectionParamsDiff\x12\x1f\n\x12replication_factor\x18\x01 \x01(\rH\x00\x88\x01\x01\x12%\n\x18write_consistency_factor\x18\x02 \x01(\rH\x01\x88\x01\x01\x12\x1c\n\x0fon_disk_payload\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12 \n\x13read_fan_out_factor\x18\x04 \x01(\rH\x03\x88\x01\x01\x42\x15\n\x13_replication_factorB\x1b\n\x19_write_consistency_factorB\x12\n\x10_on_disk_payloadB\x16\n\x14_read_fan_out_factor\"\xee\x03\n\x10\x43ollectionConfig\x12(\n\x06params\x18\x01 \x01(\x0b\x32\x18.qdrant.CollectionParams\x12+\n\x0bhnsw_config\x18\x02 \x01(\x0b\x32\x16.qdrant.HnswConfigDiff\x12\x36\n\x10optimizer_config\x18\x03 \x01(\x0b\x32\x1c.qdrant.OptimizersConfigDiff\x12)\n\nwal_config\x18\x04 \x01(\x0b\x32\x15.qdrant.WalConfigDiff\x12<\n\x13quantization_config\x18\x05 \x01(\x0b\x32\x1a.qdrant.QuantizationConfigH\x00\x88\x01\x01\x12\x39\n\x12strict_mode_config\x18\x06 \x01(\x0b\x32\x18.qdrant.StrictModeConfigH\x01\x88\x01\x01\x12\x38\n\x08metadata\x18\x07 \x03(\x0b\x32&.qdrant.CollectionConfig.MetadataEntry\x1a>\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x16\n\x14_quantization_configB\x15\n\x13_strict_mode_config\"\\\n\x12KeywordIndexParams\x12\x16\n\tis_tenant\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\x0c\n\n_is_tenantB\n\n\x08_on_disk\"\xa0\x01\n\x12IntegerIndexParams\x12\x13\n\x06lookup\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\x05range\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x03 \x01(\x08H\x02\x88\x01\x01\x12\x14\n\x07on_disk\x18\x04 \x01(\x08H\x03\x88\x01\x01\x42\t\n\x07_lookupB\x08\n\x06_rangeB\x0f\n\r_is_principalB\n\n\x08_on_disk\"`\n\x10\x46loatIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\n\n\x08_on_diskB\x0f\n\r_is_principal\"2\n\x0eGeoIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x42\n\n\x08_on_disk\"1\n\x0cStopwordsSet\x12\x11\n\tlanguages\x18\x01 \x03(\t\x12\x0e\n\x06\x63ustom\x18\x02 \x03(\t\"\xb8\x03\n\x0fTextIndexParams\x12(\n\ttokenizer\x18\x01 \x01(\x0e\x32\x15.qdrant.TokenizerType\x12\x16\n\tlowercase\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x1a\n\rmin_token_len\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x1a\n\rmax_token_len\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x14\n\x07on_disk\x18\x05 \x01(\x08H\x03\x88\x01\x01\x12,\n\tstopwords\x18\x06 \x01(\x0b\x32\x14.qdrant.StopwordsSetH\x04\x88\x01\x01\x12\x1c\n\x0fphrase_matching\x18\x07 \x01(\x08H\x05\x88\x01\x01\x12/\n\x07stemmer\x18\x08 \x01(\x0b\x32\x19.qdrant.StemmingAlgorithmH\x06\x88\x01\x01\x12\x1a\n\rascii_folding\x18\t \x01(\x08H\x07\x88\x01\x01\x42\x0c\n\n_lowercaseB\x10\n\x0e_min_token_lenB\x10\n\x0e_max_token_lenB\n\n\x08_on_diskB\x0c\n\n_stopwordsB\x12\n\x10_phrase_matchingB\n\n\x08_stemmerB\x10\n\x0e_ascii_folding\"R\n\x11StemmingAlgorithm\x12*\n\x08snowball\x18\x01 \x01(\x0b\x32\x16.qdrant.SnowballParamsH\x00\x42\x11\n\x0fstemming_params\"\"\n\x0eSnowballParams\x12\x10\n\x08language\x18\x01 \x01(\t\"3\n\x0f\x42oolIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x42\n\n\x08_on_disk\"c\n\x13\x44\x61tetimeIndexParams\x12\x14\n\x07on_disk\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x19\n\x0cis_principal\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\n\n\x08_on_diskB\x0f\n\r_is_principal\"Y\n\x0fUuidIndexParams\x12\x16\n\tis_tenant\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07on_disk\x18\x02 \x01(\x08H\x01\x88\x01\x01\x42\x0c\n\n_is_tenantB\n\n\x08_on_disk\"\xe8\x03\n\x12PayloadIndexParams\x12:\n\x14keyword_index_params\x18\x03 \x01(\x0b\x32\x1a.qdrant.KeywordIndexParamsH\x00\x12:\n\x14integer_index_params\x18\x02 \x01(\x0b\x32\x1a.qdrant.IntegerIndexParamsH\x00\x12\x36\n\x12\x66loat_index_params\x18\x04 \x01(\x0b\x32\x18.qdrant.FloatIndexParamsH\x00\x12\x32\n\x10geo_index_params\x18\x05 \x01(\x0b\x32\x16.qdrant.GeoIndexParamsH\x00\x12\x34\n\x11text_index_params\x18\x01 \x01(\x0b\x32\x17.qdrant.TextIndexParamsH\x00\x12\x34\n\x11\x62ool_index_params\x18\x06 \x01(\x0b\x32\x17.qdrant.BoolIndexParamsH\x00\x12<\n\x15\x64\x61tetime_index_params\x18\x07 \x01(\x0b\x32\x1b.qdrant.DatetimeIndexParamsH\x00\x12\x34\n\x11uuid_index_params\x18\x08 \x01(\x0b\x32\x17.qdrant.UuidIndexParamsH\x00\x42\x0e\n\x0cindex_params\"\x9d\x01\n\x11PayloadSchemaInfo\x12,\n\tdata_type\x18\x01 \x01(\x0e\x32\x19.qdrant.PayloadSchemaType\x12/\n\x06params\x18\x02 \x01(\x0b\x32\x1a.qdrant.PayloadIndexParamsH\x00\x88\x01\x01\x12\x13\n\x06points\x18\x03 \x01(\x04H\x01\x88\x01\x01\x42\t\n\x07_paramsB\t\n\x07_points\"\xec\x03\n\x0e\x43ollectionInfo\x12(\n\x06status\x18\x01 \x01(\x0e\x32\x18.qdrant.CollectionStatus\x12\x31\n\x10optimizer_status\x18\x02 \x01(\x0b\x32\x17.qdrant.OptimizerStatus\x12\x16\n\x0esegments_count\x18\x04 \x01(\x04\x12(\n\x06\x63onfig\x18\x07 \x01(\x0b\x32\x18.qdrant.CollectionConfig\x12\x41\n\x0epayload_schema\x18\x08 \x03(\x0b\x32).qdrant.CollectionInfo.PayloadSchemaEntry\x12\x19\n\x0cpoints_count\x18\t \x01(\x04H\x00\x88\x01\x01\x12\"\n\x15indexed_vectors_count\x18\n \x01(\x04H\x01\x88\x01\x01\x12+\n\x08warnings\x18\x0b \x03(\x0b\x32\x19.qdrant.CollectionWarning\x1aO\n\x12PayloadSchemaEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.qdrant.PayloadSchemaInfo:\x02\x38\x01\x42\x0f\n\r_points_countB\x18\n\x16_indexed_vectors_countJ\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"[\n\rChangeAliases\x12(\n\x07\x61\x63tions\x18\x01 \x03(\x0b\x32\x17.qdrant.AliasOperations\x12\x14\n\x07timeout\x18\x02 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"\xa2\x01\n\x0f\x41liasOperations\x12+\n\x0c\x63reate_alias\x18\x01 \x01(\x0b\x32\x13.qdrant.CreateAliasH\x00\x12+\n\x0crename_alias\x18\x02 \x01(\x0b\x32\x13.qdrant.RenameAliasH\x00\x12+\n\x0c\x64\x65lete_alias\x18\x03 \x01(\x0b\x32\x13.qdrant.DeleteAliasH\x00\x42\x08\n\x06\x61\x63tion\":\n\x0b\x43reateAlias\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x12\n\nalias_name\x18\x02 \x01(\t\"=\n\x0bRenameAlias\x12\x16\n\x0eold_alias_name\x18\x01 \x01(\t\x12\x16\n\x0enew_alias_name\x18\x02 \x01(\t\"!\n\x0b\x44\x65leteAlias\x12\x12\n\nalias_name\x18\x01 \x01(\t\"\x14\n\x12ListAliasesRequest\"7\n\x1cListCollectionAliasesRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"?\n\x10\x41liasDescription\x12\x12\n\nalias_name\x18\x01 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x02 \x01(\t\"N\n\x13ListAliasesResponse\x12)\n\x07\x61liases\x18\x01 \x03(\x0b\x32\x18.qdrant.AliasDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"7\n\x1c\x43ollectionClusterInfoRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"6\n\x08ShardKey\x12\x11\n\x07keyword\x18\x01 \x01(\tH\x00\x12\x10\n\x06number\x18\x02 \x01(\x04H\x00\x42\x05\n\x03key\"\x95\x01\n\x0eLocalShardInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x14\n\x0cpoints_count\x18\x02 \x01(\x04\x12#\n\x05state\x18\x03 \x01(\x0e\x32\x14.qdrant.ReplicaState\x12(\n\tshard_key\x18\x04 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x42\x0c\n\n_shard_key\"\x91\x01\n\x0fRemoteShardInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\x12#\n\x05state\x18\x03 \x01(\x0e\x32\x14.qdrant.ReplicaState\x12(\n\tshard_key\x18\x04 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x42\x0c\n\n_shard_key\"w\n\x11ShardTransferInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x0c\n\x04\x66rom\x18\x02 \x01(\x04\x12\n\n\x02to\x18\x03 \x01(\x04\x12\x0c\n\x04sync\x18\x04 \x01(\x08\x42\x0e\n\x0c_to_shard_id\"\x9b\x01\n\x0eReshardingInfo\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\x12(\n\tshard_key\x18\x03 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x12.\n\tdirection\x18\x04 \x01(\x0e\x32\x1b.qdrant.ReshardingDirectionB\x0c\n\n_shard_key\"\x8e\x02\n\x1d\x43ollectionClusterInfoResponse\x12\x0f\n\x07peer_id\x18\x01 \x01(\x04\x12\x13\n\x0bshard_count\x18\x02 \x01(\x04\x12,\n\x0clocal_shards\x18\x03 \x03(\x0b\x32\x16.qdrant.LocalShardInfo\x12.\n\rremote_shards\x18\x04 \x03(\x0b\x32\x17.qdrant.RemoteShardInfo\x12\x32\n\x0fshard_transfers\x18\x05 \x03(\x0b\x32\x19.qdrant.ShardTransferInfo\x12\x35\n\x15resharding_operations\x18\x06 \x03(\x0b\x32\x16.qdrant.ReshardingInfo\"\xae\x01\n\tMoveShard\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12\x30\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodH\x01\x88\x01\x01\x42\x0e\n\x0c_to_shard_idB\t\n\x07_method\"\xb3\x01\n\x0eReplicateShard\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12\x30\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodH\x01\x88\x01\x01\x42\x0e\n\x0c_to_shard_idB\t\n\x07_method\"z\n\x12\x41\x62ortShardTransfer\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x04 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x42\x0e\n\x0c_to_shard_id\"\xa4\x01\n\x0fRestartTransfer\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x18\n\x0bto_shard_id\x18\x05 \x01(\rH\x00\x88\x01\x01\x12\x14\n\x0c\x66rom_peer_id\x18\x02 \x01(\x04\x12\x12\n\nto_peer_id\x18\x03 \x01(\x04\x12+\n\x06method\x18\x04 \x01(\x0e\x32\x1b.qdrant.ShardTransferMethodB\x0e\n\x0c_to_shard_id\",\n\x07Replica\x12\x10\n\x08shard_id\x18\x01 \x01(\r\x12\x0f\n\x07peer_id\x18\x02 \x01(\x04\"\xf2\x01\n\x0e\x43reateShardKey\x12#\n\tshard_key\x18\x01 \x01(\x0b\x32\x10.qdrant.ShardKey\x12\x1a\n\rshards_number\x18\x02 \x01(\rH\x00\x88\x01\x01\x12\x1f\n\x12replication_factor\x18\x03 \x01(\rH\x01\x88\x01\x01\x12\x11\n\tplacement\x18\x04 \x03(\x04\x12\x30\n\rinitial_state\x18\x05 \x01(\x0e\x32\x14.qdrant.ReplicaStateH\x02\x88\x01\x01\x42\x10\n\x0e_shards_numberB\x15\n\x13_replication_factorB\x10\n\x0e_initial_state\"5\n\x0e\x44\x65leteShardKey\x12#\n\tshard_key\x18\x01 \x01(\x0b\x32\x10.qdrant.ShardKey\"\xc5\x03\n#UpdateCollectionClusterSetupRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\nmove_shard\x18\x02 \x01(\x0b\x32\x11.qdrant.MoveShardH\x00\x12\x31\n\x0freplicate_shard\x18\x03 \x01(\x0b\x32\x16.qdrant.ReplicateShardH\x00\x12\x34\n\x0e\x61\x62ort_transfer\x18\x04 \x01(\x0b\x32\x1a.qdrant.AbortShardTransferH\x00\x12\'\n\x0c\x64rop_replica\x18\x05 \x01(\x0b\x32\x0f.qdrant.ReplicaH\x00\x12\x32\n\x10\x63reate_shard_key\x18\x07 \x01(\x0b\x32\x16.qdrant.CreateShardKeyH\x00\x12\x32\n\x10\x64\x65lete_shard_key\x18\x08 \x01(\x0b\x32\x16.qdrant.DeleteShardKeyH\x00\x12\x33\n\x10restart_transfer\x18\t \x01(\x0b\x32\x17.qdrant.RestartTransferH\x00\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x01\x88\x01\x01\x42\x0b\n\toperationB\n\n\x08_timeout\"6\n$UpdateCollectionClusterSetupResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\"{\n\x15\x43reateShardKeyRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x07request\x18\x02 \x01(\x0b\x32\x16.qdrant.CreateShardKey\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"{\n\x15\x44\x65leteShardKeyRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x07request\x18\x02 \x01(\x0b\x32\x16.qdrant.DeleteShardKey\x12\x14\n\x07timeout\x18\x03 \x01(\x04H\x00\x88\x01\x01\x42\n\n\x08_timeout\"(\n\x16\x43reateShardKeyResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08\"(\n\x16\x44\x65leteShardKeyResponse\x12\x0e\n\x06result\x18\x01 \x01(\x08*<\n\x08\x44\x61tatype\x12\x0b\n\x07\x44\x65\x66\x61ult\x10\x00\x12\x0b\n\x07\x46loat32\x10\x01\x12\t\n\x05Uint8\x10\x02\x12\x0b\n\x07\x46loat16\x10\x03*\x1d\n\x08Modifier\x12\x08\n\x04None\x10\x00\x12\x07\n\x03Idf\x10\x01*#\n\x15MultiVectorComparator\x12\n\n\x06MaxSim\x10\x00*O\n\x08\x44istance\x12\x13\n\x0fUnknownDistance\x10\x00\x12\n\n\x06\x43osine\x10\x01\x12\n\n\x06\x45uclid\x10\x02\x12\x07\n\x03\x44ot\x10\x03\x12\r\n\tManhattan\x10\x04*Y\n\x10\x43ollectionStatus\x12\x1b\n\x17UnknownCollectionStatus\x10\x00\x12\t\n\x05Green\x10\x01\x12\n\n\x06Yellow\x10\x02\x12\x07\n\x03Red\x10\x03\x12\x08\n\x04Grey\x10\x04*~\n\x11PayloadSchemaType\x12\x0f\n\x0bUnknownType\x10\x00\x12\x0b\n\x07Keyword\x10\x01\x12\x0b\n\x07Integer\x10\x02\x12\t\n\x05\x46loat\x10\x03\x12\x07\n\x03Geo\x10\x04\x12\x08\n\x04Text\x10\x05\x12\x08\n\x04\x42ool\x10\x06\x12\x0c\n\x08\x44\x61tetime\x10\x07\x12\x08\n\x04Uuid\x10\x08*5\n\x10QuantizationType\x12\x17\n\x13UnknownQuantization\x10\x00\x12\x08\n\x04Int8\x10\x01*=\n\x10\x43ompressionRatio\x12\x06\n\x02x4\x10\x00\x12\x06\n\x02x8\x10\x01\x12\x07\n\x03x16\x10\x02\x12\x07\n\x03x32\x10\x03\x12\x07\n\x03x64\x10\x04*I\n\x1a\x42inaryQuantizationEncoding\x12\n\n\x06OneBit\x10\x00\x12\x0b\n\x07TwoBits\x10\x01\x12\x12\n\x0eOneAndHalfBits\x10\x02*&\n\x0eShardingMethod\x12\x08\n\x04\x41uto\x10\x00\x12\n\n\x06\x43ustom\x10\x01*T\n\rTokenizerType\x12\x0b\n\x07Unknown\x10\x00\x12\n\n\x06Prefix\x10\x01\x12\x0e\n\nWhitespace\x10\x02\x12\x08\n\x04Word\x10\x03\x12\x10\n\x0cMultilingual\x10\x04*\xad\x01\n\x0cReplicaState\x12\n\n\x06\x41\x63tive\x10\x00\x12\x08\n\x04\x44\x65\x61\x64\x10\x01\x12\x0b\n\x07Partial\x10\x02\x12\x10\n\x0cInitializing\x10\x03\x12\x0c\n\x08Listener\x10\x04\x12\x13\n\x0fPartialSnapshot\x10\x05\x12\x0c\n\x08Recovery\x10\x06\x12\x0e\n\nResharding\x10\x07\x12\x17\n\x13ReshardingScaleDown\x10\x08\x12\x0e\n\nActiveRead\x10\t*\'\n\x13ReshardingDirection\x12\x06\n\x02Up\x10\x00\x12\x08\n\x04\x44own\x10\x01*a\n\x13ShardTransferMethod\x12\x11\n\rStreamRecords\x10\x00\x12\x0c\n\x08Snapshot\x10\x01\x12\x0c\n\x08WalDelta\x10\x02\x12\x1b\n\x17ReshardingStreamRecords\x10\x03\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'collections_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _VECTORPARAMSMAP_MAPENTRY._options = None - _VECTORPARAMSMAP_MAPENTRY._serialized_options = b'8\001' - _VECTORPARAMSDIFFMAP_MAPENTRY._options = None - _VECTORPARAMSDIFFMAP_MAPENTRY._serialized_options = b'8\001' - _SPARSEVECTORCONFIG_MAPENTRY._options = None - _SPARSEVECTORCONFIG_MAPENTRY._serialized_options = b'8\001' - _STRICTMODESPARSECONFIG_SPARSECONFIGENTRY._options = None - _STRICTMODESPARSECONFIG_SPARSECONFIGENTRY._serialized_options = b'8\001' - _STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY._options = None - _STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY._serialized_options = b'8\001' - _COLLECTIONINFO_PAYLOADSCHEMAENTRY._options = None - _COLLECTIONINFO_PAYLOADSCHEMAENTRY._serialized_options = b'8\001' - _DATATYPE._serialized_start=14684 - _DATATYPE._serialized_end=14744 - _MODIFIER._serialized_start=14746 - _MODIFIER._serialized_end=14775 - _MULTIVECTORCOMPARATOR._serialized_start=14777 - _MULTIVECTORCOMPARATOR._serialized_end=14812 - _DISTANCE._serialized_start=14814 - _DISTANCE._serialized_end=14893 - _COLLECTIONSTATUS._serialized_start=14895 - _COLLECTIONSTATUS._serialized_end=14984 - _PAYLOADSCHEMATYPE._serialized_start=14986 - _PAYLOADSCHEMATYPE._serialized_end=15112 - _QUANTIZATIONTYPE._serialized_start=15114 - _QUANTIZATIONTYPE._serialized_end=15167 - _COMPRESSIONRATIO._serialized_start=15169 - _COMPRESSIONRATIO._serialized_end=15230 - _BINARYQUANTIZATIONENCODING._serialized_start=15232 - _BINARYQUANTIZATIONENCODING._serialized_end=15305 - _SHARDINGMETHOD._serialized_start=15307 - _SHARDINGMETHOD._serialized_end=15345 - _TOKENIZERTYPE._serialized_start=15347 - _TOKENIZERTYPE._serialized_end=15431 - _REPLICASTATE._serialized_start=15434 - _REPLICASTATE._serialized_end=15591 - _RESHARDINGDIRECTION._serialized_start=15593 - _RESHARDINGDIRECTION._serialized_end=15632 - _SHARDTRANSFERMETHOD._serialized_start=15634 - _SHARDTRANSFERMETHOD._serialized_end=15731 - _VECTORPARAMS._serialized_start=30 - _VECTORPARAMS._serialized_end=417 - _VECTORPARAMSDIFF._serialized_start=420 - _VECTORPARAMSDIFF._serialized_end=628 - _VECTORPARAMSMAP._serialized_start=631 - _VECTORPARAMSMAP._serialized_end=761 - _VECTORPARAMSMAP_MAPENTRY._serialized_start=697 - _VECTORPARAMSMAP_MAPENTRY._serialized_end=761 - _VECTORPARAMSDIFFMAP._serialized_start=764 - _VECTORPARAMSDIFFMAP._serialized_end=906 - _VECTORPARAMSDIFFMAP_MAPENTRY._serialized_start=838 - _VECTORPARAMSDIFFMAP_MAPENTRY._serialized_end=906 - _VECTORSCONFIG._serialized_start=908 - _VECTORSCONFIG._serialized_end=1020 - _VECTORSCONFIGDIFF._serialized_start=1022 - _VECTORSCONFIGDIFF._serialized_end=1146 - _SPARSEVECTORPARAMS._serialized_start=1149 - _SPARSEVECTORPARAMS._serialized_end=1280 - _SPARSEVECTORCONFIG._serialized_start=1283 - _SPARSEVECTORCONFIG._serialized_end=1425 - _SPARSEVECTORCONFIG_MAPENTRY._serialized_start=1355 - _SPARSEVECTORCONFIG_MAPENTRY._serialized_end=1425 - _MULTIVECTORCONFIG._serialized_start=1427 - _MULTIVECTORCONFIG._serialized_end=1497 - _GETCOLLECTIONINFOREQUEST._serialized_start=1499 - _GETCOLLECTIONINFOREQUEST._serialized_end=1550 - _COLLECTIONEXISTSREQUEST._serialized_start=1552 - _COLLECTIONEXISTSREQUEST._serialized_end=1602 - _COLLECTIONEXISTS._serialized_start=1604 - _COLLECTIONEXISTS._serialized_end=1638 - _COLLECTIONEXISTSRESPONSE._serialized_start=1640 - _COLLECTIONEXISTSRESPONSE._serialized_end=1722 - _LISTCOLLECTIONSREQUEST._serialized_start=1724 - _LISTCOLLECTIONSREQUEST._serialized_end=1748 - _COLLECTIONDESCRIPTION._serialized_start=1750 - _COLLECTIONDESCRIPTION._serialized_end=1787 - _GETCOLLECTIONINFORESPONSE._serialized_start=1789 - _GETCOLLECTIONINFORESPONSE._serialized_end=1870 - _LISTCOLLECTIONSRESPONSE._serialized_start=1872 - _LISTCOLLECTIONSRESPONSE._serialized_end=1963 - _MAXOPTIMIZATIONTHREADS._serialized_start=1966 - _MAXOPTIMIZATIONTHREADS._serialized_end=2098 - _MAXOPTIMIZATIONTHREADS_SETTING._serialized_start=2068 - _MAXOPTIMIZATIONTHREADS_SETTING._serialized_end=2087 - _OPTIMIZERSTATUS._serialized_start=2100 - _OPTIMIZERSTATUS._serialized_end=2144 - _HNSWCONFIGDIFF._serialized_start=2147 - _HNSWCONFIGDIFF._serialized_end=2419 - _SPARSEINDEXCONFIG._serialized_start=2422 - _SPARSEINDEXCONFIG._serialized_end=2587 - _WALCONFIGDIFF._serialized_start=2589 - _WALCONFIGDIFF._serialized_end=2710 - _OPTIMIZERSCONFIGDIFF._serialized_start=2713 - _OPTIMIZERSCONFIGDIFF._serialized_end=3327 - _SCALARQUANTIZATION._serialized_start=3330 - _SCALARQUANTIZATION._serialized_end=3466 - _PRODUCTQUANTIZATION._serialized_start=3468 - _PRODUCTQUANTIZATION._serialized_end=3576 - _BINARYQUANTIZATIONQUERYENCODING._serialized_start=3579 - _BINARYQUANTIZATIONQUERYENCODING._serialized_end=3761 - _BINARYQUANTIZATIONQUERYENCODING_SETTING._serialized_start=3682 - _BINARYQUANTIZATIONQUERYENCODING_SETTING._serialized_end=3750 - _BINARYQUANTIZATION._serialized_start=3764 - _BINARYQUANTIZATION._serialized_end=3985 - _QUANTIZATIONCONFIG._serialized_start=3988 - _QUANTIZATIONCONFIG._serialized_end=4164 - _DISABLED._serialized_start=4166 - _DISABLED._serialized_end=4176 - _QUANTIZATIONCONFIGDIFF._serialized_start=4179 - _QUANTIZATIONCONFIGDIFF._serialized_end=4397 - _STRICTMODECONFIG._serialized_start=4400 - _STRICTMODECONFIG._serialized_end=5543 - _STRICTMODESPARSECONFIG._serialized_start=5546 - _STRICTMODESPARSECONFIG._serialized_end=5722 - _STRICTMODESPARSECONFIG_SPARSECONFIGENTRY._serialized_start=5645 - _STRICTMODESPARSECONFIG_SPARSECONFIGENTRY._serialized_end=5722 - _STRICTMODESPARSE._serialized_start=5724 - _STRICTMODESPARSE._serialized_end=5782 - _STRICTMODEMULTIVECTORCONFIG._serialized_start=5785 - _STRICTMODEMULTIVECTORCONFIG._serialized_end=5991 - _STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY._serialized_start=5904 - _STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY._serialized_end=5991 - _STRICTMODEMULTIVECTOR._serialized_start=5993 - _STRICTMODEMULTIVECTOR._serialized_end=6058 - _CREATECOLLECTION._serialized_start=6061 - _CREATECOLLECTION._serialized_end=7044 - _UPDATECOLLECTION._serialized_start=7047 - _UPDATECOLLECTION._serialized_end=7673 - _DELETECOLLECTION._serialized_start=7675 - _DELETECOLLECTION._serialized_end=7752 - _COLLECTIONOPERATIONRESPONSE._serialized_start=7754 - _COLLECTIONOPERATIONRESPONSE._serialized_end=7813 - _COLLECTIONPARAMS._serialized_start=7816 - _COLLECTIONPARAMS._serialized_end=8310 - _COLLECTIONPARAMSDIFF._serialized_start=8313 - _COLLECTIONPARAMSDIFF._serialized_end=8567 - _COLLECTIONCONFIG._serialized_start=8570 - _COLLECTIONCONFIG._serialized_end=8942 - _KEYWORDINDEXPARAMS._serialized_start=8944 - _KEYWORDINDEXPARAMS._serialized_end=9036 - _INTEGERINDEXPARAMS._serialized_start=9039 - _INTEGERINDEXPARAMS._serialized_end=9199 - _FLOATINDEXPARAMS._serialized_start=9201 - _FLOATINDEXPARAMS._serialized_end=9297 - _GEOINDEXPARAMS._serialized_start=9299 - _GEOINDEXPARAMS._serialized_end=9349 - _STOPWORDSSET._serialized_start=9351 - _STOPWORDSSET._serialized_end=9400 - _TEXTINDEXPARAMS._serialized_start=9403 - _TEXTINDEXPARAMS._serialized_end=9797 - _STEMMINGALGORITHM._serialized_start=9799 - _STEMMINGALGORITHM._serialized_end=9881 - _SNOWBALLPARAMS._serialized_start=9883 - _SNOWBALLPARAMS._serialized_end=9917 - _BOOLINDEXPARAMS._serialized_start=9919 - _BOOLINDEXPARAMS._serialized_end=9970 - _DATETIMEINDEXPARAMS._serialized_start=9972 - _DATETIMEINDEXPARAMS._serialized_end=10071 - _UUIDINDEXPARAMS._serialized_start=10073 - _UUIDINDEXPARAMS._serialized_end=10162 - _PAYLOADINDEXPARAMS._serialized_start=10165 - _PAYLOADINDEXPARAMS._serialized_end=10653 - _PAYLOADSCHEMAINFO._serialized_start=10656 - _PAYLOADSCHEMAINFO._serialized_end=10813 - _COLLECTIONINFO._serialized_start=10816 - _COLLECTIONINFO._serialized_end=11303 - _COLLECTIONINFO_PAYLOADSCHEMAENTRY._serialized_start=11151 - _COLLECTIONINFO_PAYLOADSCHEMAENTRY._serialized_end=11230 - _CHANGEALIASES._serialized_start=11305 - _CHANGEALIASES._serialized_end=11396 - _ALIASOPERATIONS._serialized_start=11399 - _ALIASOPERATIONS._serialized_end=11561 - _CREATEALIAS._serialized_start=11563 - _CREATEALIAS._serialized_end=11621 - _RENAMEALIAS._serialized_start=11623 - _RENAMEALIAS._serialized_end=11684 - _DELETEALIAS._serialized_start=11686 - _DELETEALIAS._serialized_end=11719 - _LISTALIASESREQUEST._serialized_start=11721 - _LISTALIASESREQUEST._serialized_end=11741 - _LISTCOLLECTIONALIASESREQUEST._serialized_start=11743 - _LISTCOLLECTIONALIASESREQUEST._serialized_end=11798 - _ALIASDESCRIPTION._serialized_start=11800 - _ALIASDESCRIPTION._serialized_end=11863 - _LISTALIASESRESPONSE._serialized_start=11865 - _LISTALIASESRESPONSE._serialized_end=11943 - _COLLECTIONCLUSTERINFOREQUEST._serialized_start=11945 - _COLLECTIONCLUSTERINFOREQUEST._serialized_end=12000 - _SHARDKEY._serialized_start=12002 - _SHARDKEY._serialized_end=12056 - _LOCALSHARDINFO._serialized_start=12059 - _LOCALSHARDINFO._serialized_end=12208 - _REMOTESHARDINFO._serialized_start=12211 - _REMOTESHARDINFO._serialized_end=12356 - _SHARDTRANSFERINFO._serialized_start=12358 - _SHARDTRANSFERINFO._serialized_end=12477 - _RESHARDINGINFO._serialized_start=12480 - _RESHARDINGINFO._serialized_end=12635 - _COLLECTIONCLUSTERINFORESPONSE._serialized_start=12638 - _COLLECTIONCLUSTERINFORESPONSE._serialized_end=12908 - _MOVESHARD._serialized_start=12911 - _MOVESHARD._serialized_end=13085 - _REPLICATESHARD._serialized_start=13088 - _REPLICATESHARD._serialized_end=13267 - _ABORTSHARDTRANSFER._serialized_start=13269 - _ABORTSHARDTRANSFER._serialized_end=13391 - _RESTARTTRANSFER._serialized_start=13394 - _RESTARTTRANSFER._serialized_end=13558 - _REPLICA._serialized_start=13560 - _REPLICA._serialized_end=13604 - _CREATESHARDKEY._serialized_start=13607 - _CREATESHARDKEY._serialized_end=13781 - _DELETESHARDKEY._serialized_start=13783 - _DELETESHARDKEY._serialized_end=13836 - _UPDATECOLLECTIONCLUSTERSETUPREQUEST._serialized_start=13839 - _UPDATECOLLECTIONCLUSTERSETUPREQUEST._serialized_end=14292 - _UPDATECOLLECTIONCLUSTERSETUPRESPONSE._serialized_start=14294 - _UPDATECOLLECTIONCLUSTERSETUPRESPONSE._serialized_end=14348 - _CREATESHARDKEYREQUEST._serialized_start=14350 - _CREATESHARDKEYREQUEST._serialized_end=14473 - _DELETESHARDKEYREQUEST._serialized_start=14475 - _DELETESHARDKEYREQUEST._serialized_end=14598 - _CREATESHARDKEYRESPONSE._serialized_start=14600 - _CREATESHARDKEYRESPONSE._serialized_end=14640 - _DELETESHARDKEYRESPONSE._serialized_start=14642 - _DELETESHARDKEYRESPONSE._serialized_end=14682 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_VECTORPARAMSMAP_MAPENTRY']._options = None + _globals['_VECTORPARAMSMAP_MAPENTRY']._serialized_options = b'8\001' + _globals['_VECTORPARAMSDIFFMAP_MAPENTRY']._options = None + _globals['_VECTORPARAMSDIFFMAP_MAPENTRY']._serialized_options = b'8\001' + _globals['_SPARSEVECTORCONFIG_MAPENTRY']._options = None + _globals['_SPARSEVECTORCONFIG_MAPENTRY']._serialized_options = b'8\001' + _globals['_STRICTMODESPARSECONFIG_SPARSECONFIGENTRY']._options = None + _globals['_STRICTMODESPARSECONFIG_SPARSECONFIGENTRY']._serialized_options = b'8\001' + _globals['_STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY']._options = None + _globals['_STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY']._serialized_options = b'8\001' + _globals['_CREATECOLLECTION_METADATAENTRY']._options = None + _globals['_CREATECOLLECTION_METADATAENTRY']._serialized_options = b'8\001' + _globals['_UPDATECOLLECTION_METADATAENTRY']._options = None + _globals['_UPDATECOLLECTION_METADATAENTRY']._serialized_options = b'8\001' + _globals['_COLLECTIONCONFIG_METADATAENTRY']._options = None + _globals['_COLLECTIONCONFIG_METADATAENTRY']._serialized_options = b'8\001' + _globals['_COLLECTIONINFO_PAYLOADSCHEMAENTRY']._options = None + _globals['_COLLECTIONINFO_PAYLOADSCHEMAENTRY']._serialized_options = b'8\001' + _globals['_DATATYPE']._serialized_start=15343 + _globals['_DATATYPE']._serialized_end=15403 + _globals['_MODIFIER']._serialized_start=15405 + _globals['_MODIFIER']._serialized_end=15434 + _globals['_MULTIVECTORCOMPARATOR']._serialized_start=15436 + _globals['_MULTIVECTORCOMPARATOR']._serialized_end=15471 + _globals['_DISTANCE']._serialized_start=15473 + _globals['_DISTANCE']._serialized_end=15552 + _globals['_COLLECTIONSTATUS']._serialized_start=15554 + _globals['_COLLECTIONSTATUS']._serialized_end=15643 + _globals['_PAYLOADSCHEMATYPE']._serialized_start=15645 + _globals['_PAYLOADSCHEMATYPE']._serialized_end=15771 + _globals['_QUANTIZATIONTYPE']._serialized_start=15773 + _globals['_QUANTIZATIONTYPE']._serialized_end=15826 + _globals['_COMPRESSIONRATIO']._serialized_start=15828 + _globals['_COMPRESSIONRATIO']._serialized_end=15889 + _globals['_BINARYQUANTIZATIONENCODING']._serialized_start=15891 + _globals['_BINARYQUANTIZATIONENCODING']._serialized_end=15964 + _globals['_SHARDINGMETHOD']._serialized_start=15966 + _globals['_SHARDINGMETHOD']._serialized_end=16004 + _globals['_TOKENIZERTYPE']._serialized_start=16006 + _globals['_TOKENIZERTYPE']._serialized_end=16090 + _globals['_REPLICASTATE']._serialized_start=16093 + _globals['_REPLICASTATE']._serialized_end=16266 + _globals['_RESHARDINGDIRECTION']._serialized_start=16268 + _globals['_RESHARDINGDIRECTION']._serialized_end=16307 + _globals['_SHARDTRANSFERMETHOD']._serialized_start=16309 + _globals['_SHARDTRANSFERMETHOD']._serialized_end=16406 + _globals['_VECTORPARAMS']._serialized_start=51 + _globals['_VECTORPARAMS']._serialized_end=438 + _globals['_VECTORPARAMSDIFF']._serialized_start=441 + _globals['_VECTORPARAMSDIFF']._serialized_end=649 + _globals['_VECTORPARAMSMAP']._serialized_start=652 + _globals['_VECTORPARAMSMAP']._serialized_end=782 + _globals['_VECTORPARAMSMAP_MAPENTRY']._serialized_start=718 + _globals['_VECTORPARAMSMAP_MAPENTRY']._serialized_end=782 + _globals['_VECTORPARAMSDIFFMAP']._serialized_start=785 + _globals['_VECTORPARAMSDIFFMAP']._serialized_end=927 + _globals['_VECTORPARAMSDIFFMAP_MAPENTRY']._serialized_start=859 + _globals['_VECTORPARAMSDIFFMAP_MAPENTRY']._serialized_end=927 + _globals['_VECTORSCONFIG']._serialized_start=929 + _globals['_VECTORSCONFIG']._serialized_end=1041 + _globals['_VECTORSCONFIGDIFF']._serialized_start=1043 + _globals['_VECTORSCONFIGDIFF']._serialized_end=1167 + _globals['_SPARSEVECTORPARAMS']._serialized_start=1170 + _globals['_SPARSEVECTORPARAMS']._serialized_end=1301 + _globals['_SPARSEVECTORCONFIG']._serialized_start=1304 + _globals['_SPARSEVECTORCONFIG']._serialized_end=1446 + _globals['_SPARSEVECTORCONFIG_MAPENTRY']._serialized_start=1376 + _globals['_SPARSEVECTORCONFIG_MAPENTRY']._serialized_end=1446 + _globals['_MULTIVECTORCONFIG']._serialized_start=1448 + _globals['_MULTIVECTORCONFIG']._serialized_end=1518 + _globals['_GETCOLLECTIONINFOREQUEST']._serialized_start=1520 + _globals['_GETCOLLECTIONINFOREQUEST']._serialized_end=1571 + _globals['_COLLECTIONEXISTSREQUEST']._serialized_start=1573 + _globals['_COLLECTIONEXISTSREQUEST']._serialized_end=1623 + _globals['_COLLECTIONEXISTS']._serialized_start=1625 + _globals['_COLLECTIONEXISTS']._serialized_end=1659 + _globals['_COLLECTIONEXISTSRESPONSE']._serialized_start=1661 + _globals['_COLLECTIONEXISTSRESPONSE']._serialized_end=1743 + _globals['_LISTCOLLECTIONSREQUEST']._serialized_start=1745 + _globals['_LISTCOLLECTIONSREQUEST']._serialized_end=1769 + _globals['_COLLECTIONDESCRIPTION']._serialized_start=1771 + _globals['_COLLECTIONDESCRIPTION']._serialized_end=1808 + _globals['_GETCOLLECTIONINFORESPONSE']._serialized_start=1810 + _globals['_GETCOLLECTIONINFORESPONSE']._serialized_end=1891 + _globals['_LISTCOLLECTIONSRESPONSE']._serialized_start=1893 + _globals['_LISTCOLLECTIONSRESPONSE']._serialized_end=1984 + _globals['_MAXOPTIMIZATIONTHREADS']._serialized_start=1987 + _globals['_MAXOPTIMIZATIONTHREADS']._serialized_end=2119 + _globals['_MAXOPTIMIZATIONTHREADS_SETTING']._serialized_start=2089 + _globals['_MAXOPTIMIZATIONTHREADS_SETTING']._serialized_end=2108 + _globals['_OPTIMIZERSTATUS']._serialized_start=2121 + _globals['_OPTIMIZERSTATUS']._serialized_end=2165 + _globals['_COLLECTIONWARNING']._serialized_start=2167 + _globals['_COLLECTIONWARNING']._serialized_end=2203 + _globals['_HNSWCONFIGDIFF']._serialized_start=2206 + _globals['_HNSWCONFIGDIFF']._serialized_end=2526 + _globals['_SPARSEINDEXCONFIG']._serialized_start=2529 + _globals['_SPARSEINDEXCONFIG']._serialized_end=2694 + _globals['_WALCONFIGDIFF']._serialized_start=2697 + _globals['_WALCONFIGDIFF']._serialized_end=2872 + _globals['_OPTIMIZERSCONFIGDIFF']._serialized_start=2875 + _globals['_OPTIMIZERSCONFIGDIFF']._serialized_end=3489 + _globals['_SCALARQUANTIZATION']._serialized_start=3492 + _globals['_SCALARQUANTIZATION']._serialized_end=3628 + _globals['_PRODUCTQUANTIZATION']._serialized_start=3630 + _globals['_PRODUCTQUANTIZATION']._serialized_end=3738 + _globals['_BINARYQUANTIZATIONQUERYENCODING']._serialized_start=3741 + _globals['_BINARYQUANTIZATIONQUERYENCODING']._serialized_end=3923 + _globals['_BINARYQUANTIZATIONQUERYENCODING_SETTING']._serialized_start=3844 + _globals['_BINARYQUANTIZATIONQUERYENCODING_SETTING']._serialized_end=3912 + _globals['_BINARYQUANTIZATION']._serialized_start=3926 + _globals['_BINARYQUANTIZATION']._serialized_end=4147 + _globals['_QUANTIZATIONCONFIG']._serialized_start=4150 + _globals['_QUANTIZATIONCONFIG']._serialized_end=4326 + _globals['_DISABLED']._serialized_start=4328 + _globals['_DISABLED']._serialized_end=4338 + _globals['_QUANTIZATIONCONFIGDIFF']._serialized_start=4341 + _globals['_QUANTIZATIONCONFIGDIFF']._serialized_end=4559 + _globals['_STRICTMODECONFIG']._serialized_start=4562 + _globals['_STRICTMODECONFIG']._serialized_end=5771 + _globals['_STRICTMODESPARSECONFIG']._serialized_start=5774 + _globals['_STRICTMODESPARSECONFIG']._serialized_end=5950 + _globals['_STRICTMODESPARSECONFIG_SPARSECONFIGENTRY']._serialized_start=5873 + _globals['_STRICTMODESPARSECONFIG_SPARSECONFIGENTRY']._serialized_end=5950 + _globals['_STRICTMODESPARSE']._serialized_start=5952 + _globals['_STRICTMODESPARSE']._serialized_end=6010 + _globals['_STRICTMODEMULTIVECTORCONFIG']._serialized_start=6013 + _globals['_STRICTMODEMULTIVECTORCONFIG']._serialized_end=6219 + _globals['_STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY']._serialized_start=6132 + _globals['_STRICTMODEMULTIVECTORCONFIG_MULTIVECTORCONFIGENTRY']._serialized_end=6219 + _globals['_STRICTMODEMULTIVECTOR']._serialized_start=6221 + _globals['_STRICTMODEMULTIVECTOR']._serialized_end=6286 + _globals['_CREATECOLLECTION']._serialized_start=6289 + _globals['_CREATECOLLECTION']._serialized_end=7340 + _globals['_CREATECOLLECTION_METADATAENTRY']._serialized_start=6994 + _globals['_CREATECOLLECTION_METADATAENTRY']._serialized_end=7056 + _globals['_UPDATECOLLECTION']._serialized_start=7343 + _globals['_UPDATECOLLECTION']._serialized_end=8091 + _globals['_UPDATECOLLECTION_METADATAENTRY']._serialized_start=6994 + _globals['_UPDATECOLLECTION_METADATAENTRY']._serialized_end=7056 + _globals['_DELETECOLLECTION']._serialized_start=8093 + _globals['_DELETECOLLECTION']._serialized_end=8170 + _globals['_COLLECTIONOPERATIONRESPONSE']._serialized_start=8172 + _globals['_COLLECTIONOPERATIONRESPONSE']._serialized_end=8231 + _globals['_COLLECTIONPARAMS']._serialized_start=8234 + _globals['_COLLECTIONPARAMS']._serialized_end=8728 + _globals['_COLLECTIONPARAMSDIFF']._serialized_start=8731 + _globals['_COLLECTIONPARAMSDIFF']._serialized_end=8985 + _globals['_COLLECTIONCONFIG']._serialized_start=8988 + _globals['_COLLECTIONCONFIG']._serialized_end=9482 + _globals['_COLLECTIONCONFIG_METADATAENTRY']._serialized_start=6994 + _globals['_COLLECTIONCONFIG_METADATAENTRY']._serialized_end=7056 + _globals['_KEYWORDINDEXPARAMS']._serialized_start=9484 + _globals['_KEYWORDINDEXPARAMS']._serialized_end=9576 + _globals['_INTEGERINDEXPARAMS']._serialized_start=9579 + _globals['_INTEGERINDEXPARAMS']._serialized_end=9739 + _globals['_FLOATINDEXPARAMS']._serialized_start=9741 + _globals['_FLOATINDEXPARAMS']._serialized_end=9837 + _globals['_GEOINDEXPARAMS']._serialized_start=9839 + _globals['_GEOINDEXPARAMS']._serialized_end=9889 + _globals['_STOPWORDSSET']._serialized_start=9891 + _globals['_STOPWORDSSET']._serialized_end=9940 + _globals['_TEXTINDEXPARAMS']._serialized_start=9943 + _globals['_TEXTINDEXPARAMS']._serialized_end=10383 + _globals['_STEMMINGALGORITHM']._serialized_start=10385 + _globals['_STEMMINGALGORITHM']._serialized_end=10467 + _globals['_SNOWBALLPARAMS']._serialized_start=10469 + _globals['_SNOWBALLPARAMS']._serialized_end=10503 + _globals['_BOOLINDEXPARAMS']._serialized_start=10505 + _globals['_BOOLINDEXPARAMS']._serialized_end=10556 + _globals['_DATETIMEINDEXPARAMS']._serialized_start=10558 + _globals['_DATETIMEINDEXPARAMS']._serialized_end=10657 + _globals['_UUIDINDEXPARAMS']._serialized_start=10659 + _globals['_UUIDINDEXPARAMS']._serialized_end=10748 + _globals['_PAYLOADINDEXPARAMS']._serialized_start=10751 + _globals['_PAYLOADINDEXPARAMS']._serialized_end=11239 + _globals['_PAYLOADSCHEMAINFO']._serialized_start=11242 + _globals['_PAYLOADSCHEMAINFO']._serialized_end=11399 + _globals['_COLLECTIONINFO']._serialized_start=11402 + _globals['_COLLECTIONINFO']._serialized_end=11894 + _globals['_COLLECTIONINFO_PAYLOADSCHEMAENTRY']._serialized_start=11754 + _globals['_COLLECTIONINFO_PAYLOADSCHEMAENTRY']._serialized_end=11833 + _globals['_CHANGEALIASES']._serialized_start=11896 + _globals['_CHANGEALIASES']._serialized_end=11987 + _globals['_ALIASOPERATIONS']._serialized_start=11990 + _globals['_ALIASOPERATIONS']._serialized_end=12152 + _globals['_CREATEALIAS']._serialized_start=12154 + _globals['_CREATEALIAS']._serialized_end=12212 + _globals['_RENAMEALIAS']._serialized_start=12214 + _globals['_RENAMEALIAS']._serialized_end=12275 + _globals['_DELETEALIAS']._serialized_start=12277 + _globals['_DELETEALIAS']._serialized_end=12310 + _globals['_LISTALIASESREQUEST']._serialized_start=12312 + _globals['_LISTALIASESREQUEST']._serialized_end=12332 + _globals['_LISTCOLLECTIONALIASESREQUEST']._serialized_start=12334 + _globals['_LISTCOLLECTIONALIASESREQUEST']._serialized_end=12389 + _globals['_ALIASDESCRIPTION']._serialized_start=12391 + _globals['_ALIASDESCRIPTION']._serialized_end=12454 + _globals['_LISTALIASESRESPONSE']._serialized_start=12456 + _globals['_LISTALIASESRESPONSE']._serialized_end=12534 + _globals['_COLLECTIONCLUSTERINFOREQUEST']._serialized_start=12536 + _globals['_COLLECTIONCLUSTERINFOREQUEST']._serialized_end=12591 + _globals['_SHARDKEY']._serialized_start=12593 + _globals['_SHARDKEY']._serialized_end=12647 + _globals['_LOCALSHARDINFO']._serialized_start=12650 + _globals['_LOCALSHARDINFO']._serialized_end=12799 + _globals['_REMOTESHARDINFO']._serialized_start=12802 + _globals['_REMOTESHARDINFO']._serialized_end=12947 + _globals['_SHARDTRANSFERINFO']._serialized_start=12949 + _globals['_SHARDTRANSFERINFO']._serialized_end=13068 + _globals['_RESHARDINGINFO']._serialized_start=13071 + _globals['_RESHARDINGINFO']._serialized_end=13226 + _globals['_COLLECTIONCLUSTERINFORESPONSE']._serialized_start=13229 + _globals['_COLLECTIONCLUSTERINFORESPONSE']._serialized_end=13499 + _globals['_MOVESHARD']._serialized_start=13502 + _globals['_MOVESHARD']._serialized_end=13676 + _globals['_REPLICATESHARD']._serialized_start=13679 + _globals['_REPLICATESHARD']._serialized_end=13858 + _globals['_ABORTSHARDTRANSFER']._serialized_start=13860 + _globals['_ABORTSHARDTRANSFER']._serialized_end=13982 + _globals['_RESTARTTRANSFER']._serialized_start=13985 + _globals['_RESTARTTRANSFER']._serialized_end=14149 + _globals['_REPLICA']._serialized_start=14151 + _globals['_REPLICA']._serialized_end=14195 + _globals['_CREATESHARDKEY']._serialized_start=14198 + _globals['_CREATESHARDKEY']._serialized_end=14440 + _globals['_DELETESHARDKEY']._serialized_start=14442 + _globals['_DELETESHARDKEY']._serialized_end=14495 + _globals['_UPDATECOLLECTIONCLUSTERSETUPREQUEST']._serialized_start=14498 + _globals['_UPDATECOLLECTIONCLUSTERSETUPREQUEST']._serialized_end=14951 + _globals['_UPDATECOLLECTIONCLUSTERSETUPRESPONSE']._serialized_start=14953 + _globals['_UPDATECOLLECTIONCLUSTERSETUPRESPONSE']._serialized_end=15007 + _globals['_CREATESHARDKEYREQUEST']._serialized_start=15009 + _globals['_CREATESHARDKEYREQUEST']._serialized_end=15132 + _globals['_DELETESHARDKEYREQUEST']._serialized_start=15134 + _globals['_DELETESHARDKEYREQUEST']._serialized_end=15257 + _globals['_CREATESHARDKEYRESPONSE']._serialized_start=15259 + _globals['_CREATESHARDKEYRESPONSE']._serialized_end=15299 + _globals['_DELETESHARDKEYRESPONSE']._serialized_start=15301 + _globals['_DELETESHARDKEYRESPONSE']._serialized_end=15341 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/collections_pb2.pyi b/qdrant_client/grpc/collections_pb2.pyi index 108792fb..a031e536 100644 --- a/qdrant_client/grpc/collections_pb2.pyi +++ b/qdrant_client/grpc/collections_pb2.pyi @@ -8,6 +8,7 @@ import google.protobuf.descriptor import google.protobuf.internal.containers import google.protobuf.internal.enum_type_wrapper import google.protobuf.message +import json_with_int_pb2 import sys import typing @@ -261,6 +262,8 @@ class _ReplicaStateEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._E """Points are being migrated to this shard as part of scale-up resharding""" ReshardingScaleDown: _ReplicaState.ValueType # 8 """Points are being migrated to this shard as part of scale-down resharding""" + ActiveRead: _ReplicaState.ValueType # 9 + """Active for readers, Partial for writers""" class ReplicaState(_ReplicaState, metaclass=_ReplicaStateEnumTypeWrapper): ... @@ -282,6 +285,8 @@ Resharding: ReplicaState.ValueType # 7 """Points are being migrated to this shard as part of scale-up resharding""" ReshardingScaleDown: ReplicaState.ValueType # 8 """Points are being migrated to this shard as part of scale-down resharding""" +ActiveRead: ReplicaState.ValueType # 9 +"""Active for readers, Partial for writers""" global___ReplicaState = ReplicaState class _ReshardingDirection: @@ -769,6 +774,20 @@ class OptimizerStatus(google.protobuf.message.Message): global___OptimizerStatus = OptimizerStatus +class CollectionWarning(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + MESSAGE_FIELD_NUMBER: builtins.int + message: builtins.str + def __init__( + self, + *, + message: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["message", b"message"]) -> None: ... + +global___CollectionWarning = CollectionWarning + class HnswConfigDiff(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -778,6 +797,7 @@ class HnswConfigDiff(google.protobuf.message.Message): MAX_INDEXING_THREADS_FIELD_NUMBER: builtins.int ON_DISK_FIELD_NUMBER: builtins.int PAYLOAD_M_FIELD_NUMBER: builtins.int + INLINE_STORAGE_FIELD_NUMBER: builtins.int m: builtins.int """ Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. @@ -788,10 +808,12 @@ class HnswConfigDiff(google.protobuf.message.Message): """ full_scan_threshold: builtins.int """ - Minimal size (in KiloBytes) of vectors for additional payload-based indexing. - If the payload chunk is smaller than `full_scan_threshold` additional indexing won't be used - - in this case full-scan search should be preferred by query planner and additional indexing is not required. - Note: 1 Kb = 1 vector of size 256 + Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. + This measures the total size of vectors being queried against. + When the maximum estimated amount of points that a condition satisfies is smaller than + `full_scan_threshold`, the query planner will use full-scan search instead of HNSW index + traversal for better performance. + Note: 1Kb = 1 vector of size 256 """ max_indexing_threads: builtins.int """ @@ -808,6 +830,13 @@ class HnswConfigDiff(google.protobuf.message.Message): """ Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. """ + inline_storage: builtins.bool + """ + Store copies of original and quantized vectors within the HNSW index file. Default: false. + Enabling this option will trade the search speed for disk usage by reducing amount of + random seeks during the search. + Requires quantized vectors to be enabled. Multi-vectors are not supported. + """ def __init__( self, *, @@ -817,14 +846,17 @@ class HnswConfigDiff(google.protobuf.message.Message): max_indexing_threads: builtins.int | None = ..., on_disk: builtins.bool | None = ..., payload_m: builtins.int | None = ..., + inline_storage: builtins.bool | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_ef_construct", b"_ef_construct", "_full_scan_threshold", b"_full_scan_threshold", "_m", b"_m", "_max_indexing_threads", b"_max_indexing_threads", "_on_disk", b"_on_disk", "_payload_m", b"_payload_m", "ef_construct", b"ef_construct", "full_scan_threshold", b"full_scan_threshold", "m", b"m", "max_indexing_threads", b"max_indexing_threads", "on_disk", b"on_disk", "payload_m", b"payload_m"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_ef_construct", b"_ef_construct", "_full_scan_threshold", b"_full_scan_threshold", "_m", b"_m", "_max_indexing_threads", b"_max_indexing_threads", "_on_disk", b"_on_disk", "_payload_m", b"_payload_m", "ef_construct", b"ef_construct", "full_scan_threshold", b"full_scan_threshold", "m", b"m", "max_indexing_threads", b"max_indexing_threads", "on_disk", b"on_disk", "payload_m", b"payload_m"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_ef_construct", b"_ef_construct", "_full_scan_threshold", b"_full_scan_threshold", "_inline_storage", b"_inline_storage", "_m", b"_m", "_max_indexing_threads", b"_max_indexing_threads", "_on_disk", b"_on_disk", "_payload_m", b"_payload_m", "ef_construct", b"ef_construct", "full_scan_threshold", b"full_scan_threshold", "inline_storage", b"inline_storage", "m", b"m", "max_indexing_threads", b"max_indexing_threads", "on_disk", b"on_disk", "payload_m", b"payload_m"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_ef_construct", b"_ef_construct", "_full_scan_threshold", b"_full_scan_threshold", "_inline_storage", b"_inline_storage", "_m", b"_m", "_max_indexing_threads", b"_max_indexing_threads", "_on_disk", b"_on_disk", "_payload_m", b"_payload_m", "ef_construct", b"ef_construct", "full_scan_threshold", b"full_scan_threshold", "inline_storage", b"inline_storage", "m", b"m", "max_indexing_threads", b"max_indexing_threads", "on_disk", b"on_disk", "payload_m", b"payload_m"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_ef_construct", b"_ef_construct"]) -> typing_extensions.Literal["ef_construct"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_full_scan_threshold", b"_full_scan_threshold"]) -> typing_extensions.Literal["full_scan_threshold"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_inline_storage", b"_inline_storage"]) -> typing_extensions.Literal["inline_storage"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_m", b"_m"]) -> typing_extensions.Literal["m"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_indexing_threads", b"_max_indexing_threads"]) -> typing_extensions.Literal["max_indexing_threads"] | None: ... @@ -877,21 +909,27 @@ class WalConfigDiff(google.protobuf.message.Message): WAL_CAPACITY_MB_FIELD_NUMBER: builtins.int WAL_SEGMENTS_AHEAD_FIELD_NUMBER: builtins.int + WAL_RETAIN_CLOSED_FIELD_NUMBER: builtins.int wal_capacity_mb: builtins.int """Size of a single WAL block file""" wal_segments_ahead: builtins.int """Number of segments to create in advance""" + wal_retain_closed: builtins.int + """Number of closed segments to retain""" def __init__( self, *, wal_capacity_mb: builtins.int | None = ..., wal_segments_ahead: builtins.int | None = ..., + wal_retain_closed: builtins.int | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_wal_capacity_mb", b"_wal_capacity_mb", "_wal_segments_ahead", b"_wal_segments_ahead", "wal_capacity_mb", b"wal_capacity_mb", "wal_segments_ahead", b"wal_segments_ahead"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_wal_capacity_mb", b"_wal_capacity_mb", "_wal_segments_ahead", b"_wal_segments_ahead", "wal_capacity_mb", b"wal_capacity_mb", "wal_segments_ahead", b"wal_segments_ahead"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_wal_capacity_mb", b"_wal_capacity_mb", "_wal_retain_closed", b"_wal_retain_closed", "_wal_segments_ahead", b"_wal_segments_ahead", "wal_capacity_mb", b"wal_capacity_mb", "wal_retain_closed", b"wal_retain_closed", "wal_segments_ahead", b"wal_segments_ahead"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_wal_capacity_mb", b"_wal_capacity_mb", "_wal_retain_closed", b"_wal_retain_closed", "_wal_segments_ahead", b"_wal_segments_ahead", "wal_capacity_mb", b"wal_capacity_mb", "wal_retain_closed", b"wal_retain_closed", "wal_segments_ahead", b"wal_segments_ahead"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_wal_capacity_mb", b"_wal_capacity_mb"]) -> typing_extensions.Literal["wal_capacity_mb"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_wal_retain_closed", b"_wal_retain_closed"]) -> typing_extensions.Literal["wal_retain_closed"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_wal_segments_ahead", b"_wal_segments_ahead"]) -> typing_extensions.Literal["wal_segments_ahead"] | None: ... global___WalConfigDiff = WalConfigDiff @@ -929,6 +967,8 @@ class OptimizersConfigDiff(google.protobuf.message.Message): """ max_segment_size: builtins.int """ + Deprecated: + Do not create segments larger this size (in kilobytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. @@ -1209,28 +1249,47 @@ class StrictModeConfig(google.protobuf.message.Message): MULTIVECTOR_CONFIG_FIELD_NUMBER: builtins.int SPARSE_CONFIG_FIELD_NUMBER: builtins.int MAX_POINTS_COUNT_FIELD_NUMBER: builtins.int + MAX_PAYLOAD_INDEX_COUNT_FIELD_NUMBER: builtins.int enabled: builtins.bool + """Whether strict mode is enabled for a collection or not.""" max_query_limit: builtins.int + """Max allowed `limit` parameter for all APIs that don't have their own max limit.""" max_timeout: builtins.int + """Max allowed `timeout` parameter.""" unindexed_filtering_retrieve: builtins.bool + """Allow usage of unindexed fields in retrieval based (e.g. search) filters.""" unindexed_filtering_update: builtins.bool + """Allow usage of unindexed fields in filtered updates (e.g. delete by payload).""" search_max_hnsw_ef: builtins.int + """Max HNSW ef value allowed in search parameters.""" search_allow_exact: builtins.bool + """Whether exact search is allowed.""" search_max_oversampling: builtins.float + """Max oversampling value allowed in search""" upsert_max_batchsize: builtins.int + """Max batchsize when upserting""" max_collection_vector_size_bytes: builtins.int + """Max size of a collections vector storage in bytes, ignoring replicas.""" read_rate_limit: builtins.int """Max number of read operations per minute per replica""" write_rate_limit: builtins.int """Max number of write operations per minute per replica""" max_collection_payload_size_bytes: builtins.int + """Max size of a collections payload storage in bytes, ignoring replicas.""" filter_max_conditions: builtins.int + """Max conditions a filter can have.""" condition_max_size: builtins.int + """Max size of a condition, eg. items in `MatchAny`.""" @property - def multivector_config(self) -> global___StrictModeMultivectorConfig: ... + def multivector_config(self) -> global___StrictModeMultivectorConfig: + """Multivector strict mode configuration""" @property - def sparse_config(self) -> global___StrictModeSparseConfig: ... + def sparse_config(self) -> global___StrictModeSparseConfig: + """Sparse vector strict mode configuration""" max_points_count: builtins.int + """Max number of points estimated in a collection""" + max_payload_index_count: builtins.int + """Max number of payload indexes in a collection""" def __init__( self, *, @@ -1252,9 +1311,10 @@ class StrictModeConfig(google.protobuf.message.Message): multivector_config: global___StrictModeMultivectorConfig | None = ..., sparse_config: global___StrictModeSparseConfig | None = ..., max_points_count: builtins.int | None = ..., + max_payload_index_count: builtins.int | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_condition_max_size", b"_condition_max_size", "_enabled", b"_enabled", "_filter_max_conditions", b"_filter_max_conditions", "_max_collection_payload_size_bytes", b"_max_collection_payload_size_bytes", "_max_collection_vector_size_bytes", b"_max_collection_vector_size_bytes", "_max_points_count", b"_max_points_count", "_max_query_limit", b"_max_query_limit", "_max_timeout", b"_max_timeout", "_multivector_config", b"_multivector_config", "_read_rate_limit", b"_read_rate_limit", "_search_allow_exact", b"_search_allow_exact", "_search_max_hnsw_ef", b"_search_max_hnsw_ef", "_search_max_oversampling", b"_search_max_oversampling", "_sparse_config", b"_sparse_config", "_unindexed_filtering_retrieve", b"_unindexed_filtering_retrieve", "_unindexed_filtering_update", b"_unindexed_filtering_update", "_upsert_max_batchsize", b"_upsert_max_batchsize", "_write_rate_limit", b"_write_rate_limit", "condition_max_size", b"condition_max_size", "enabled", b"enabled", "filter_max_conditions", b"filter_max_conditions", "max_collection_payload_size_bytes", b"max_collection_payload_size_bytes", "max_collection_vector_size_bytes", b"max_collection_vector_size_bytes", "max_points_count", b"max_points_count", "max_query_limit", b"max_query_limit", "max_timeout", b"max_timeout", "multivector_config", b"multivector_config", "read_rate_limit", b"read_rate_limit", "search_allow_exact", b"search_allow_exact", "search_max_hnsw_ef", b"search_max_hnsw_ef", "search_max_oversampling", b"search_max_oversampling", "sparse_config", b"sparse_config", "unindexed_filtering_retrieve", b"unindexed_filtering_retrieve", "unindexed_filtering_update", b"unindexed_filtering_update", "upsert_max_batchsize", b"upsert_max_batchsize", "write_rate_limit", b"write_rate_limit"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_condition_max_size", b"_condition_max_size", "_enabled", b"_enabled", "_filter_max_conditions", b"_filter_max_conditions", "_max_collection_payload_size_bytes", b"_max_collection_payload_size_bytes", "_max_collection_vector_size_bytes", b"_max_collection_vector_size_bytes", "_max_points_count", b"_max_points_count", "_max_query_limit", b"_max_query_limit", "_max_timeout", b"_max_timeout", "_multivector_config", b"_multivector_config", "_read_rate_limit", b"_read_rate_limit", "_search_allow_exact", b"_search_allow_exact", "_search_max_hnsw_ef", b"_search_max_hnsw_ef", "_search_max_oversampling", b"_search_max_oversampling", "_sparse_config", b"_sparse_config", "_unindexed_filtering_retrieve", b"_unindexed_filtering_retrieve", "_unindexed_filtering_update", b"_unindexed_filtering_update", "_upsert_max_batchsize", b"_upsert_max_batchsize", "_write_rate_limit", b"_write_rate_limit", "condition_max_size", b"condition_max_size", "enabled", b"enabled", "filter_max_conditions", b"filter_max_conditions", "max_collection_payload_size_bytes", b"max_collection_payload_size_bytes", "max_collection_vector_size_bytes", b"max_collection_vector_size_bytes", "max_points_count", b"max_points_count", "max_query_limit", b"max_query_limit", "max_timeout", b"max_timeout", "multivector_config", b"multivector_config", "read_rate_limit", b"read_rate_limit", "search_allow_exact", b"search_allow_exact", "search_max_hnsw_ef", b"search_max_hnsw_ef", "search_max_oversampling", b"search_max_oversampling", "sparse_config", b"sparse_config", "unindexed_filtering_retrieve", b"unindexed_filtering_retrieve", "unindexed_filtering_update", b"unindexed_filtering_update", "upsert_max_batchsize", b"upsert_max_batchsize", "write_rate_limit", b"write_rate_limit"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_condition_max_size", b"_condition_max_size", "_enabled", b"_enabled", "_filter_max_conditions", b"_filter_max_conditions", "_max_collection_payload_size_bytes", b"_max_collection_payload_size_bytes", "_max_collection_vector_size_bytes", b"_max_collection_vector_size_bytes", "_max_payload_index_count", b"_max_payload_index_count", "_max_points_count", b"_max_points_count", "_max_query_limit", b"_max_query_limit", "_max_timeout", b"_max_timeout", "_multivector_config", b"_multivector_config", "_read_rate_limit", b"_read_rate_limit", "_search_allow_exact", b"_search_allow_exact", "_search_max_hnsw_ef", b"_search_max_hnsw_ef", "_search_max_oversampling", b"_search_max_oversampling", "_sparse_config", b"_sparse_config", "_unindexed_filtering_retrieve", b"_unindexed_filtering_retrieve", "_unindexed_filtering_update", b"_unindexed_filtering_update", "_upsert_max_batchsize", b"_upsert_max_batchsize", "_write_rate_limit", b"_write_rate_limit", "condition_max_size", b"condition_max_size", "enabled", b"enabled", "filter_max_conditions", b"filter_max_conditions", "max_collection_payload_size_bytes", b"max_collection_payload_size_bytes", "max_collection_vector_size_bytes", b"max_collection_vector_size_bytes", "max_payload_index_count", b"max_payload_index_count", "max_points_count", b"max_points_count", "max_query_limit", b"max_query_limit", "max_timeout", b"max_timeout", "multivector_config", b"multivector_config", "read_rate_limit", b"read_rate_limit", "search_allow_exact", b"search_allow_exact", "search_max_hnsw_ef", b"search_max_hnsw_ef", "search_max_oversampling", b"search_max_oversampling", "sparse_config", b"sparse_config", "unindexed_filtering_retrieve", b"unindexed_filtering_retrieve", "unindexed_filtering_update", b"unindexed_filtering_update", "upsert_max_batchsize", b"upsert_max_batchsize", "write_rate_limit", b"write_rate_limit"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_condition_max_size", b"_condition_max_size", "_enabled", b"_enabled", "_filter_max_conditions", b"_filter_max_conditions", "_max_collection_payload_size_bytes", b"_max_collection_payload_size_bytes", "_max_collection_vector_size_bytes", b"_max_collection_vector_size_bytes", "_max_payload_index_count", b"_max_payload_index_count", "_max_points_count", b"_max_points_count", "_max_query_limit", b"_max_query_limit", "_max_timeout", b"_max_timeout", "_multivector_config", b"_multivector_config", "_read_rate_limit", b"_read_rate_limit", "_search_allow_exact", b"_search_allow_exact", "_search_max_hnsw_ef", b"_search_max_hnsw_ef", "_search_max_oversampling", b"_search_max_oversampling", "_sparse_config", b"_sparse_config", "_unindexed_filtering_retrieve", b"_unindexed_filtering_retrieve", "_unindexed_filtering_update", b"_unindexed_filtering_update", "_upsert_max_batchsize", b"_upsert_max_batchsize", "_write_rate_limit", b"_write_rate_limit", "condition_max_size", b"condition_max_size", "enabled", b"enabled", "filter_max_conditions", b"filter_max_conditions", "max_collection_payload_size_bytes", b"max_collection_payload_size_bytes", "max_collection_vector_size_bytes", b"max_collection_vector_size_bytes", "max_payload_index_count", b"max_payload_index_count", "max_points_count", b"max_points_count", "max_query_limit", b"max_query_limit", "max_timeout", b"max_timeout", "multivector_config", b"multivector_config", "read_rate_limit", b"read_rate_limit", "search_allow_exact", b"search_allow_exact", "search_max_hnsw_ef", b"search_max_hnsw_ef", "search_max_oversampling", b"search_max_oversampling", "sparse_config", b"sparse_config", "unindexed_filtering_retrieve", b"unindexed_filtering_retrieve", "unindexed_filtering_update", b"unindexed_filtering_update", "upsert_max_batchsize", b"upsert_max_batchsize", "write_rate_limit", b"write_rate_limit"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_condition_max_size", b"_condition_max_size"]) -> typing_extensions.Literal["condition_max_size"] | None: ... @typing.overload @@ -1266,6 +1326,8 @@ class StrictModeConfig(google.protobuf.message.Message): @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_collection_vector_size_bytes", b"_max_collection_vector_size_bytes"]) -> typing_extensions.Literal["max_collection_vector_size_bytes"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_payload_index_count", b"_max_payload_index_count"]) -> typing_extensions.Literal["max_payload_index_count"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_points_count", b"_max_points_count"]) -> typing_extensions.Literal["max_points_count"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_query_limit", b"_max_query_limit"]) -> typing_extensions.Literal["max_query_limit"] | None: ... @@ -1331,6 +1393,7 @@ class StrictModeSparse(google.protobuf.message.Message): MAX_LENGTH_FIELD_NUMBER: builtins.int max_length: builtins.int + """Max length of sparse vector""" def __init__( self, *, @@ -1379,6 +1442,7 @@ class StrictModeMultivector(google.protobuf.message.Message): MAX_VECTORS_FIELD_NUMBER: builtins.int max_vectors: builtins.int + """Max number of vectors in a multivector""" def __init__( self, *, @@ -1393,6 +1457,23 @@ global___StrictModeMultivector = StrictModeMultivector class CreateCollection(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor + class MetadataEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + @property + def value(self) -> json_with_int_pb2.Value: ... + def __init__( + self, + *, + key: builtins.str = ..., + value: json_with_int_pb2.Value | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ... + COLLECTION_NAME_FIELD_NUMBER: builtins.int HNSW_CONFIG_FIELD_NUMBER: builtins.int WAL_CONFIG_FIELD_NUMBER: builtins.int @@ -1403,11 +1484,11 @@ class CreateCollection(google.protobuf.message.Message): VECTORS_CONFIG_FIELD_NUMBER: builtins.int REPLICATION_FACTOR_FIELD_NUMBER: builtins.int WRITE_CONSISTENCY_FACTOR_FIELD_NUMBER: builtins.int - INIT_FROM_COLLECTION_FIELD_NUMBER: builtins.int QUANTIZATION_CONFIG_FIELD_NUMBER: builtins.int SHARDING_METHOD_FIELD_NUMBER: builtins.int SPARSE_VECTORS_CONFIG_FIELD_NUMBER: builtins.int STRICT_MODE_CONFIG_FIELD_NUMBER: builtins.int + METADATA_FIELD_NUMBER: builtins.int collection_name: builtins.str """Name of the collection""" @property @@ -1432,8 +1513,6 @@ class CreateCollection(google.protobuf.message.Message): """Number of replicas of each shard that network tries to maintain, default = 1""" write_consistency_factor: builtins.int """How many replicas should apply the operation for us to consider it successful, default = 1""" - init_from_collection: builtins.str - """Specify name of the other collection to copy data from""" @property def quantization_config(self) -> global___QuantizationConfig: """Quantization configuration of vector""" @@ -1445,6 +1524,9 @@ class CreateCollection(google.protobuf.message.Message): @property def strict_mode_config(self) -> global___StrictModeConfig: """Configuration for strict mode""" + @property + def metadata(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, json_with_int_pb2.Value]: + """Arbitrary JSON metadata for the collection""" def __init__( self, *, @@ -1458,19 +1540,17 @@ class CreateCollection(google.protobuf.message.Message): vectors_config: global___VectorsConfig | None = ..., replication_factor: builtins.int | None = ..., write_consistency_factor: builtins.int | None = ..., - init_from_collection: builtins.str | None = ..., quantization_config: global___QuantizationConfig | None = ..., sharding_method: global___ShardingMethod.ValueType | None = ..., sparse_vectors_config: global___SparseVectorConfig | None = ..., strict_mode_config: global___StrictModeConfig | None = ..., + metadata: collections.abc.Mapping[builtins.str, json_with_int_pb2.Value] | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_init_from_collection", b"_init_from_collection", "_on_disk_payload", b"_on_disk_payload", "_optimizers_config", b"_optimizers_config", "_quantization_config", b"_quantization_config", "_replication_factor", b"_replication_factor", "_shard_number", b"_shard_number", "_sharding_method", b"_sharding_method", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "_wal_config", b"_wal_config", "_write_consistency_factor", b"_write_consistency_factor", "hnsw_config", b"hnsw_config", "init_from_collection", b"init_from_collection", "on_disk_payload", b"on_disk_payload", "optimizers_config", b"optimizers_config", "quantization_config", b"quantization_config", "replication_factor", b"replication_factor", "shard_number", b"shard_number", "sharding_method", b"sharding_method", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config", "wal_config", b"wal_config", "write_consistency_factor", b"write_consistency_factor"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_init_from_collection", b"_init_from_collection", "_on_disk_payload", b"_on_disk_payload", "_optimizers_config", b"_optimizers_config", "_quantization_config", b"_quantization_config", "_replication_factor", b"_replication_factor", "_shard_number", b"_shard_number", "_sharding_method", b"_sharding_method", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "_wal_config", b"_wal_config", "_write_consistency_factor", b"_write_consistency_factor", "collection_name", b"collection_name", "hnsw_config", b"hnsw_config", "init_from_collection", b"init_from_collection", "on_disk_payload", b"on_disk_payload", "optimizers_config", b"optimizers_config", "quantization_config", b"quantization_config", "replication_factor", b"replication_factor", "shard_number", b"shard_number", "sharding_method", b"sharding_method", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config", "wal_config", b"wal_config", "write_consistency_factor", b"write_consistency_factor"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_on_disk_payload", b"_on_disk_payload", "_optimizers_config", b"_optimizers_config", "_quantization_config", b"_quantization_config", "_replication_factor", b"_replication_factor", "_shard_number", b"_shard_number", "_sharding_method", b"_sharding_method", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "_wal_config", b"_wal_config", "_write_consistency_factor", b"_write_consistency_factor", "hnsw_config", b"hnsw_config", "on_disk_payload", b"on_disk_payload", "optimizers_config", b"optimizers_config", "quantization_config", b"quantization_config", "replication_factor", b"replication_factor", "shard_number", b"shard_number", "sharding_method", b"sharding_method", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config", "wal_config", b"wal_config", "write_consistency_factor", b"write_consistency_factor"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_on_disk_payload", b"_on_disk_payload", "_optimizers_config", b"_optimizers_config", "_quantization_config", b"_quantization_config", "_replication_factor", b"_replication_factor", "_shard_number", b"_shard_number", "_sharding_method", b"_sharding_method", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "_wal_config", b"_wal_config", "_write_consistency_factor", b"_write_consistency_factor", "collection_name", b"collection_name", "hnsw_config", b"hnsw_config", "metadata", b"metadata", "on_disk_payload", b"on_disk_payload", "optimizers_config", b"optimizers_config", "quantization_config", b"quantization_config", "replication_factor", b"replication_factor", "shard_number", b"shard_number", "sharding_method", b"sharding_method", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config", "wal_config", b"wal_config", "write_consistency_factor", b"write_consistency_factor"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_hnsw_config", b"_hnsw_config"]) -> typing_extensions.Literal["hnsw_config"] | None: ... @typing.overload - def WhichOneof(self, oneof_group: typing_extensions.Literal["_init_from_collection", b"_init_from_collection"]) -> typing_extensions.Literal["init_from_collection"] | None: ... - @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_on_disk_payload", b"_on_disk_payload"]) -> typing_extensions.Literal["on_disk_payload"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_optimizers_config", b"_optimizers_config"]) -> typing_extensions.Literal["optimizers_config"] | None: ... @@ -1500,6 +1580,23 @@ global___CreateCollection = CreateCollection class UpdateCollection(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor + class MetadataEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + @property + def value(self) -> json_with_int_pb2.Value: ... + def __init__( + self, + *, + key: builtins.str = ..., + value: json_with_int_pb2.Value | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ... + COLLECTION_NAME_FIELD_NUMBER: builtins.int OPTIMIZERS_CONFIG_FIELD_NUMBER: builtins.int TIMEOUT_FIELD_NUMBER: builtins.int @@ -1509,6 +1606,7 @@ class UpdateCollection(google.protobuf.message.Message): QUANTIZATION_CONFIG_FIELD_NUMBER: builtins.int SPARSE_VECTORS_CONFIG_FIELD_NUMBER: builtins.int STRICT_MODE_CONFIG_FIELD_NUMBER: builtins.int + METADATA_FIELD_NUMBER: builtins.int collection_name: builtins.str """Name of the collection""" @property @@ -1534,6 +1632,9 @@ class UpdateCollection(google.protobuf.message.Message): @property def strict_mode_config(self) -> global___StrictModeConfig: """New strict mode configuration""" + @property + def metadata(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, json_with_int_pb2.Value]: + """Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata""" def __init__( self, *, @@ -1546,9 +1647,10 @@ class UpdateCollection(google.protobuf.message.Message): quantization_config: global___QuantizationConfigDiff | None = ..., sparse_vectors_config: global___SparseVectorConfig | None = ..., strict_mode_config: global___StrictModeConfig | None = ..., + metadata: collections.abc.Mapping[builtins.str, json_with_int_pb2.Value] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_optimizers_config", b"_optimizers_config", "_params", b"_params", "_quantization_config", b"_quantization_config", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "hnsw_config", b"hnsw_config", "optimizers_config", b"optimizers_config", "params", b"params", "quantization_config", b"quantization_config", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_optimizers_config", b"_optimizers_config", "_params", b"_params", "_quantization_config", b"_quantization_config", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "collection_name", b"collection_name", "hnsw_config", b"hnsw_config", "optimizers_config", b"optimizers_config", "params", b"params", "quantization_config", b"quantization_config", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["_hnsw_config", b"_hnsw_config", "_optimizers_config", b"_optimizers_config", "_params", b"_params", "_quantization_config", b"_quantization_config", "_sparse_vectors_config", b"_sparse_vectors_config", "_strict_mode_config", b"_strict_mode_config", "_timeout", b"_timeout", "_vectors_config", b"_vectors_config", "collection_name", b"collection_name", "hnsw_config", b"hnsw_config", "metadata", b"metadata", "optimizers_config", b"optimizers_config", "params", b"params", "quantization_config", b"quantization_config", "sparse_vectors_config", b"sparse_vectors_config", "strict_mode_config", b"strict_mode_config", "timeout", b"timeout", "vectors_config", b"vectors_config"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_hnsw_config", b"_hnsw_config"]) -> typing_extensions.Literal["hnsw_config"] | None: ... @typing.overload @@ -1705,12 +1807,30 @@ global___CollectionParamsDiff = CollectionParamsDiff class CollectionConfig(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor + class MetadataEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.str + @property + def value(self) -> json_with_int_pb2.Value: ... + def __init__( + self, + *, + key: builtins.str = ..., + value: json_with_int_pb2.Value | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ... + PARAMS_FIELD_NUMBER: builtins.int HNSW_CONFIG_FIELD_NUMBER: builtins.int OPTIMIZER_CONFIG_FIELD_NUMBER: builtins.int WAL_CONFIG_FIELD_NUMBER: builtins.int QUANTIZATION_CONFIG_FIELD_NUMBER: builtins.int STRICT_MODE_CONFIG_FIELD_NUMBER: builtins.int + METADATA_FIELD_NUMBER: builtins.int @property def params(self) -> global___CollectionParams: """Collection parameters""" @@ -1729,6 +1849,9 @@ class CollectionConfig(google.protobuf.message.Message): @property def strict_mode_config(self) -> global___StrictModeConfig: """Configuration of strict mode.""" + @property + def metadata(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, json_with_int_pb2.Value]: + """Arbitrary JSON metadata for the collection""" def __init__( self, *, @@ -1738,9 +1861,10 @@ class CollectionConfig(google.protobuf.message.Message): wal_config: global___WalConfigDiff | None = ..., quantization_config: global___QuantizationConfig | None = ..., strict_mode_config: global___StrictModeConfig | None = ..., + metadata: collections.abc.Mapping[builtins.str, json_with_int_pb2.Value] | None = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["_quantization_config", b"_quantization_config", "_strict_mode_config", b"_strict_mode_config", "hnsw_config", b"hnsw_config", "optimizer_config", b"optimizer_config", "params", b"params", "quantization_config", b"quantization_config", "strict_mode_config", b"strict_mode_config", "wal_config", b"wal_config"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_quantization_config", b"_quantization_config", "_strict_mode_config", b"_strict_mode_config", "hnsw_config", b"hnsw_config", "optimizer_config", b"optimizer_config", "params", b"params", "quantization_config", b"quantization_config", "strict_mode_config", b"strict_mode_config", "wal_config", b"wal_config"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["_quantization_config", b"_quantization_config", "_strict_mode_config", b"_strict_mode_config", "hnsw_config", b"hnsw_config", "metadata", b"metadata", "optimizer_config", b"optimizer_config", "params", b"params", "quantization_config", b"quantization_config", "strict_mode_config", b"strict_mode_config", "wal_config", b"wal_config"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_quantization_config", b"_quantization_config"]) -> typing_extensions.Literal["quantization_config"] | None: ... @typing.overload @@ -1881,6 +2005,7 @@ class TextIndexParams(google.protobuf.message.Message): STOPWORDS_FIELD_NUMBER: builtins.int PHRASE_MATCHING_FIELD_NUMBER: builtins.int STEMMER_FIELD_NUMBER: builtins.int + ASCII_FOLDING_FIELD_NUMBER: builtins.int tokenizer: global___TokenizerType.ValueType """Tokenizer type""" lowercase: builtins.bool @@ -1899,6 +2024,8 @@ class TextIndexParams(google.protobuf.message.Message): @property def stemmer(self) -> global___StemmingAlgorithm: """Set an algorithm for stemming.""" + ascii_folding: builtins.bool + """If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false.""" def __init__( self, *, @@ -1910,9 +2037,12 @@ class TextIndexParams(google.protobuf.message.Message): stopwords: global___StopwordsSet | None = ..., phrase_matching: builtins.bool | None = ..., stemmer: global___StemmingAlgorithm | None = ..., + ascii_folding: builtins.bool | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_lowercase", b"_lowercase", "_max_token_len", b"_max_token_len", "_min_token_len", b"_min_token_len", "_on_disk", b"_on_disk", "_phrase_matching", b"_phrase_matching", "_stemmer", b"_stemmer", "_stopwords", b"_stopwords", "lowercase", b"lowercase", "max_token_len", b"max_token_len", "min_token_len", b"min_token_len", "on_disk", b"on_disk", "phrase_matching", b"phrase_matching", "stemmer", b"stemmer", "stopwords", b"stopwords"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_lowercase", b"_lowercase", "_max_token_len", b"_max_token_len", "_min_token_len", b"_min_token_len", "_on_disk", b"_on_disk", "_phrase_matching", b"_phrase_matching", "_stemmer", b"_stemmer", "_stopwords", b"_stopwords", "lowercase", b"lowercase", "max_token_len", b"max_token_len", "min_token_len", b"min_token_len", "on_disk", b"on_disk", "phrase_matching", b"phrase_matching", "stemmer", b"stemmer", "stopwords", b"stopwords", "tokenizer", b"tokenizer"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_ascii_folding", b"_ascii_folding", "_lowercase", b"_lowercase", "_max_token_len", b"_max_token_len", "_min_token_len", b"_min_token_len", "_on_disk", b"_on_disk", "_phrase_matching", b"_phrase_matching", "_stemmer", b"_stemmer", "_stopwords", b"_stopwords", "ascii_folding", b"ascii_folding", "lowercase", b"lowercase", "max_token_len", b"max_token_len", "min_token_len", b"min_token_len", "on_disk", b"on_disk", "phrase_matching", b"phrase_matching", "stemmer", b"stemmer", "stopwords", b"stopwords"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_ascii_folding", b"_ascii_folding", "_lowercase", b"_lowercase", "_max_token_len", b"_max_token_len", "_min_token_len", b"_min_token_len", "_on_disk", b"_on_disk", "_phrase_matching", b"_phrase_matching", "_stemmer", b"_stemmer", "_stopwords", b"_stopwords", "ascii_folding", b"ascii_folding", "lowercase", b"lowercase", "max_token_len", b"max_token_len", "min_token_len", b"min_token_len", "on_disk", b"on_disk", "phrase_matching", b"phrase_matching", "stemmer", b"stemmer", "stopwords", b"stopwords", "tokenizer", b"tokenizer"]) -> None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_ascii_folding", b"_ascii_folding"]) -> typing_extensions.Literal["ascii_folding"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_lowercase", b"_lowercase"]) -> typing_extensions.Literal["lowercase"] | None: ... @typing.overload @@ -2132,19 +2262,17 @@ class CollectionInfo(google.protobuf.message.Message): STATUS_FIELD_NUMBER: builtins.int OPTIMIZER_STATUS_FIELD_NUMBER: builtins.int - VECTORS_COUNT_FIELD_NUMBER: builtins.int SEGMENTS_COUNT_FIELD_NUMBER: builtins.int CONFIG_FIELD_NUMBER: builtins.int PAYLOAD_SCHEMA_FIELD_NUMBER: builtins.int POINTS_COUNT_FIELD_NUMBER: builtins.int INDEXED_VECTORS_COUNT_FIELD_NUMBER: builtins.int + WARNINGS_FIELD_NUMBER: builtins.int status: global___CollectionStatus.ValueType """operating condition of the collection""" @property def optimizer_status(self) -> global___OptimizerStatus: """status of collection optimizers""" - vectors_count: builtins.int - """Approximate number of vectors in the collection""" segments_count: builtins.int """Number of independent segments""" @property @@ -2157,26 +2285,27 @@ class CollectionInfo(google.protobuf.message.Message): """Approximate number of points in the collection""" indexed_vectors_count: builtins.int """Approximate number of indexed vectors in the collection.""" + @property + def warnings(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CollectionWarning]: + """Warnings related to the collection""" def __init__( self, *, status: global___CollectionStatus.ValueType = ..., optimizer_status: global___OptimizerStatus | None = ..., - vectors_count: builtins.int | None = ..., segments_count: builtins.int = ..., config: global___CollectionConfig | None = ..., payload_schema: collections.abc.Mapping[builtins.str, global___PayloadSchemaInfo] | None = ..., points_count: builtins.int | None = ..., indexed_vectors_count: builtins.int | None = ..., + warnings: collections.abc.Iterable[global___CollectionWarning] | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_indexed_vectors_count", b"_indexed_vectors_count", "_points_count", b"_points_count", "_vectors_count", b"_vectors_count", "config", b"config", "indexed_vectors_count", b"indexed_vectors_count", "optimizer_status", b"optimizer_status", "points_count", b"points_count", "vectors_count", b"vectors_count"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_indexed_vectors_count", b"_indexed_vectors_count", "_points_count", b"_points_count", "_vectors_count", b"_vectors_count", "config", b"config", "indexed_vectors_count", b"indexed_vectors_count", "optimizer_status", b"optimizer_status", "payload_schema", b"payload_schema", "points_count", b"points_count", "segments_count", b"segments_count", "status", b"status", "vectors_count", b"vectors_count"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_indexed_vectors_count", b"_indexed_vectors_count", "_points_count", b"_points_count", "config", b"config", "indexed_vectors_count", b"indexed_vectors_count", "optimizer_status", b"optimizer_status", "points_count", b"points_count"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_indexed_vectors_count", b"_indexed_vectors_count", "_points_count", b"_points_count", "config", b"config", "indexed_vectors_count", b"indexed_vectors_count", "optimizer_status", b"optimizer_status", "payload_schema", b"payload_schema", "points_count", b"points_count", "segments_count", b"segments_count", "status", b"status", "warnings", b"warnings"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_indexed_vectors_count", b"_indexed_vectors_count"]) -> typing_extensions.Literal["indexed_vectors_count"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_points_count", b"_points_count"]) -> typing_extensions.Literal["points_count"] | None: ... - @typing.overload - def WhichOneof(self, oneof_group: typing_extensions.Literal["_vectors_count", b"_vectors_count"]) -> typing_extensions.Literal["vectors_count"] | None: ... global___CollectionInfo = CollectionInfo @@ -2674,6 +2803,7 @@ class CreateShardKey(google.protobuf.message.Message): SHARDS_NUMBER_FIELD_NUMBER: builtins.int REPLICATION_FACTOR_FIELD_NUMBER: builtins.int PLACEMENT_FIELD_NUMBER: builtins.int + INITIAL_STATE_FIELD_NUMBER: builtins.int @property def shard_key(self) -> global___ShardKey: """User-defined shard key""" @@ -2684,6 +2814,8 @@ class CreateShardKey(google.protobuf.message.Message): @property def placement(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: """List of peer ids, allowed to create shards. If empty - all peers are allowed""" + initial_state: global___ReplicaState.ValueType + """Initial state of created replicas. Warning: use with care.""" def __init__( self, *, @@ -2691,9 +2823,12 @@ class CreateShardKey(google.protobuf.message.Message): shards_number: builtins.int | None = ..., replication_factor: builtins.int | None = ..., placement: collections.abc.Iterable[builtins.int] | None = ..., + initial_state: global___ReplicaState.ValueType | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_replication_factor", b"_replication_factor", "_shards_number", b"_shards_number", "replication_factor", b"replication_factor", "shard_key", b"shard_key", "shards_number", b"shards_number"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_replication_factor", b"_replication_factor", "_shards_number", b"_shards_number", "placement", b"placement", "replication_factor", b"replication_factor", "shard_key", b"shard_key", "shards_number", b"shards_number"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_initial_state", b"_initial_state", "_replication_factor", b"_replication_factor", "_shards_number", b"_shards_number", "initial_state", b"initial_state", "replication_factor", b"replication_factor", "shard_key", b"shard_key", "shards_number", b"shards_number"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_initial_state", b"_initial_state", "_replication_factor", b"_replication_factor", "_shards_number", b"_shards_number", "initial_state", b"initial_state", "placement", b"placement", "replication_factor", b"replication_factor", "shard_key", b"shard_key", "shards_number", b"shards_number"]) -> None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_initial_state", b"_initial_state"]) -> typing_extensions.Literal["initial_state"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_replication_factor", b"_replication_factor"]) -> typing_extensions.Literal["replication_factor"] | None: ... @typing.overload diff --git a/qdrant_client/grpc/collections_service_pb2.py b/qdrant_client/grpc/collections_service_pb2.py index d54f141e..e5bdb5f5 100644 --- a/qdrant_client/grpc/collections_service_pb2.py +++ b/qdrant_client/grpc/collections_service_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: collections_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,13 +17,12 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19\x63ollections_service.proto\x12\x06qdrant\x1a\x11\x63ollections.proto2\xe2\x08\n\x0b\x43ollections\x12L\n\x03Get\x12 .qdrant.GetCollectionInfoRequest\x1a!.qdrant.GetCollectionInfoResponse\"\x00\x12I\n\x04List\x12\x1e.qdrant.ListCollectionsRequest\x1a\x1f.qdrant.ListCollectionsResponse\"\x00\x12I\n\x06\x43reate\x12\x18.qdrant.CreateCollection\x1a#.qdrant.CollectionOperationResponse\"\x00\x12I\n\x06Update\x12\x18.qdrant.UpdateCollection\x1a#.qdrant.CollectionOperationResponse\"\x00\x12I\n\x06\x44\x65lete\x12\x18.qdrant.DeleteCollection\x1a#.qdrant.CollectionOperationResponse\"\x00\x12M\n\rUpdateAliases\x12\x15.qdrant.ChangeAliases\x1a#.qdrant.CollectionOperationResponse\"\x00\x12\\\n\x15ListCollectionAliases\x12$.qdrant.ListCollectionAliasesRequest\x1a\x1b.qdrant.ListAliasesResponse\"\x00\x12H\n\x0bListAliases\x12\x1a.qdrant.ListAliasesRequest\x1a\x1b.qdrant.ListAliasesResponse\"\x00\x12\x66\n\x15\x43ollectionClusterInfo\x12$.qdrant.CollectionClusterInfoRequest\x1a%.qdrant.CollectionClusterInfoResponse\"\x00\x12W\n\x10\x43ollectionExists\x12\x1f.qdrant.CollectionExistsRequest\x1a .qdrant.CollectionExistsResponse\"\x00\x12{\n\x1cUpdateCollectionClusterSetup\x12+.qdrant.UpdateCollectionClusterSetupRequest\x1a,.qdrant.UpdateCollectionClusterSetupResponse\"\x00\x12Q\n\x0e\x43reateShardKey\x12\x1d.qdrant.CreateShardKeyRequest\x1a\x1e.qdrant.CreateShardKeyResponse\"\x00\x12Q\n\x0e\x44\x65leteShardKey\x12\x1d.qdrant.DeleteShardKeyRequest\x1a\x1e.qdrant.DeleteShardKeyResponse\"\x00\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - - -_COLLECTIONS = DESCRIPTOR.services_by_name['Collections'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'collections_service_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _COLLECTIONS._serialized_start=57 - _COLLECTIONS._serialized_end=1179 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_COLLECTIONS']._serialized_start=57 + _globals['_COLLECTIONS']._serialized_end=1179 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/json_with_int_pb2.py b/qdrant_client/grpc/json_with_int_pb2.py index d7d5f5b6..d9775578 100644 --- a/qdrant_client/grpc/json_with_int_pb2.py +++ b/qdrant_client/grpc/json_with_int_pb2.py @@ -1,13 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: json_with_int.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,58 +16,22 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x13json_with_int.proto\x12\x06qdrant\"r\n\x06Struct\x12*\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x1a.qdrant.Struct.FieldsEntry\x1a<\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xe8\x01\n\x05Value\x12\'\n\nnull_value\x18\x01 \x01(\x0e\x32\x11.qdrant.NullValueH\x00\x12\x16\n\x0c\x64ouble_value\x18\x02 \x01(\x01H\x00\x12\x17\n\rinteger_value\x18\x03 \x01(\x03H\x00\x12\x16\n\x0cstring_value\x18\x04 \x01(\tH\x00\x12\x14\n\nbool_value\x18\x05 \x01(\x08H\x00\x12&\n\x0cstruct_value\x18\x06 \x01(\x0b\x32\x0e.qdrant.StructH\x00\x12\'\n\nlist_value\x18\x07 \x01(\x0b\x32\x11.qdrant.ListValueH\x00\x42\x06\n\x04kind\"*\n\tListValue\x12\x1d\n\x06values\x18\x01 \x03(\x0b\x32\r.qdrant.Value*\x1b\n\tNullValue\x12\x0e\n\nNULL_VALUE\x10\x00\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') -_NULLVALUE = DESCRIPTOR.enum_types_by_name['NullValue'] -NullValue = enum_type_wrapper.EnumTypeWrapper(_NULLVALUE) -NULL_VALUE = 0 - - -_STRUCT = DESCRIPTOR.message_types_by_name['Struct'] -_STRUCT_FIELDSENTRY = _STRUCT.nested_types_by_name['FieldsEntry'] -_VALUE = DESCRIPTOR.message_types_by_name['Value'] -_LISTVALUE = DESCRIPTOR.message_types_by_name['ListValue'] -Struct = _reflection.GeneratedProtocolMessageType('Struct', (_message.Message,), { - - 'FieldsEntry' : _reflection.GeneratedProtocolMessageType('FieldsEntry', (_message.Message,), { - 'DESCRIPTOR' : _STRUCT_FIELDSENTRY, - '__module__' : 'json_with_int_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Struct.FieldsEntry) - }) - , - 'DESCRIPTOR' : _STRUCT, - '__module__' : 'json_with_int_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Struct) - }) -_sym_db.RegisterMessage(Struct) -_sym_db.RegisterMessage(Struct.FieldsEntry) - -Value = _reflection.GeneratedProtocolMessageType('Value', (_message.Message,), { - 'DESCRIPTOR' : _VALUE, - '__module__' : 'json_with_int_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Value) - }) -_sym_db.RegisterMessage(Value) - -ListValue = _reflection.GeneratedProtocolMessageType('ListValue', (_message.Message,), { - 'DESCRIPTOR' : _LISTVALUE, - '__module__' : 'json_with_int_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListValue) - }) -_sym_db.RegisterMessage(ListValue) - +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'json_with_int_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _STRUCT_FIELDSENTRY._options = None - _STRUCT_FIELDSENTRY._serialized_options = b'8\001' - _NULLVALUE._serialized_start=426 - _NULLVALUE._serialized_end=453 - _STRUCT._serialized_start=31 - _STRUCT._serialized_end=145 - _STRUCT_FIELDSENTRY._serialized_start=85 - _STRUCT_FIELDSENTRY._serialized_end=145 - _VALUE._serialized_start=148 - _VALUE._serialized_end=380 - _LISTVALUE._serialized_start=382 - _LISTVALUE._serialized_end=424 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_STRUCT_FIELDSENTRY']._options = None + _globals['_STRUCT_FIELDSENTRY']._serialized_options = b'8\001' + _globals['_NULLVALUE']._serialized_start=426 + _globals['_NULLVALUE']._serialized_end=453 + _globals['_STRUCT']._serialized_start=31 + _globals['_STRUCT']._serialized_end=145 + _globals['_STRUCT_FIELDSENTRY']._serialized_start=85 + _globals['_STRUCT_FIELDSENTRY']._serialized_end=145 + _globals['_VALUE']._serialized_start=148 + _globals['_VALUE']._serialized_end=380 + _globals['_LISTVALUE']._serialized_start=382 + _globals['_LISTVALUE']._serialized_end=424 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/points_pb2.py b/qdrant_client/grpc/points_pb2.py index 33ef2d94..6836be79 100644 --- a/qdrant_client/grpc/points_pb2.py +++ b/qdrant_client/grpc/points_pb2.py @@ -1,13 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: points.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" -from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -18,1691 +17,388 @@ from . import json_with_int_pb2 as json__with__int__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cpoints.proto\x12\x06qdrant\x1a\x11\x63ollections.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13json_with_int.proto\"8\n\rWriteOrdering\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.qdrant.WriteOrderingType\"Y\n\x0fReadConsistency\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1b.qdrant.ReadConsistencyTypeH\x00\x12\x10\n\x06\x66\x61\x63tor\x18\x02 \x01(\x04H\x00\x42\x07\n\x05value\"<\n\x07PointId\x12\r\n\x03num\x18\x01 \x01(\x04H\x00\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x42\x12\n\x10point_id_options\"\x1d\n\rSparseIndices\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\r\"\x96\x01\n\x08\x44ocument\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\r\n\x05model\x18\x03 \x01(\t\x12.\n\x07options\x18\x04 \x03(\x0b\x32\x1d.qdrant.Document.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xa0\x01\n\x05Image\x12\x1c\n\x05image\x18\x01 \x01(\x0b\x32\r.qdrant.Value\x12\r\n\x05model\x18\x02 \x01(\t\x12+\n\x07options\x18\x03 \x03(\x0b\x32\x1a.qdrant.Image.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xb5\x01\n\x0fInferenceObject\x12\x1d\n\x06object\x18\x01 \x01(\x0b\x32\r.qdrant.Value\x12\r\n\x05model\x18\x02 \x01(\t\x12\x35\n\x07options\x18\x03 \x03(\x0b\x32$.qdrant.InferenceObject.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xf7\x02\n\x06Vector\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\x12+\n\x07indices\x18\x02 \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x01\x88\x01\x01\x12\x1a\n\rvectors_count\x18\x03 \x01(\rH\x02\x88\x01\x01\x12$\n\x05\x64\x65nse\x18\x65 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x66 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18g \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x12$\n\x08\x64ocument\x18h \x01(\x0b\x32\x10.qdrant.DocumentH\x00\x12\x1e\n\x05image\x18i \x01(\x0b\x32\r.qdrant.ImageH\x00\x12)\n\x06object\x18j \x01(\x0b\x32\x17.qdrant.InferenceObjectH\x00\x42\x08\n\x06vectorB\n\n\x08_indicesB\x10\n\x0e_vectors_count\"\x8c\x02\n\x0cVectorOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\x12+\n\x07indices\x18\x02 \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x01\x88\x01\x01\x12\x1a\n\rvectors_count\x18\x03 \x01(\rH\x02\x88\x01\x01\x12$\n\x05\x64\x65nse\x18\x65 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x66 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18g \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x42\x08\n\x06vectorB\n\n\x08_indicesB\x10\n\x0e_vectors_count\"\x1b\n\x0b\x44\x65nseVector\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\"/\n\x0cSparseVector\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x0f\n\x07indices\x18\x02 \x03(\r\"8\n\x10MultiDenseVector\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.qdrant.DenseVector\"\xa7\x02\n\x0bVectorInput\x12\x1d\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x12$\n\x05\x64\x65nse\x18\x02 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x03 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18\x04 \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x12$\n\x08\x64ocument\x18\x05 \x01(\x0b\x32\x10.qdrant.DocumentH\x00\x12\x1e\n\x05image\x18\x06 \x01(\x0b\x32\r.qdrant.ImageH\x00\x12)\n\x06object\x18\x07 \x01(\x0b\x32\x17.qdrant.InferenceObjectH\x00\x42\t\n\x07variant\"8\n\x10ShardKeySelector\x12$\n\nshard_keys\x18\x01 \x03(\x0b\x32\x10.qdrant.ShardKey\"\xf5\x01\n\x0cUpsertPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12#\n\x06points\x18\x03 \x03(\x0b\x32\x13.qdrant.PointStruct\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\xf8\x01\n\x0c\x44\x65letePoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12&\n\x06points\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\x85\x03\n\tGetPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1c\n\x03ids\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x0cwith_payload\x18\x04 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\x05 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x00\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x06 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x08 \x01(\x04H\x03\x88\x01\x01\x42\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\n\n\x08_timeoutJ\x04\x08\x03\x10\x04\"\xfc\x01\n\x12UpdatePointVectors\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12$\n\x06points\x18\x03 \x03(\x0b\x32\x14.qdrant.PointVectors\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"M\n\x0cPointVectors\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12 \n\x07vectors\x18\x02 \x01(\x0b\x32\x0f.qdrant.Vectors\"\xb1\x02\n\x12\x44\x65letePointVectors\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12/\n\x0fpoints_selector\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12(\n\x07vectors\x18\x04 \x01(\x0b\x32\x17.qdrant.VectorsSelector\x12,\n\x08ordering\x18\x05 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x06 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\xb5\x03\n\x10SetPayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x36\n\x07payload\x18\x03 \x03(\x0b\x32%.qdrant.SetPayloadPoints.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x05 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x01\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x02\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x03\x88\x01\x01\x12\x10\n\x03key\x18\x08 \x01(\tH\x04\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x07\n\x05_waitB\x12\n\x10_points_selectorB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorB\x06\n\x04_keyJ\x04\x08\x04\x10\x05\"\xb5\x02\n\x13\x44\x65letePayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x0c\n\x04keys\x18\x03 \x03(\t\x12\x34\n\x0fpoints_selector\x18\x05 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x01\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x02\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x03\x88\x01\x01\x42\x07\n\x05_waitB\x12\n\x10_points_selectorB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorJ\x04\x08\x04\x10\x05\"\xfe\x01\n\x12\x43learPayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12&\n\x06points\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\xaf\x02\n\x1a\x43reateFieldIndexCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\nfield_name\x18\x03 \x01(\t\x12*\n\nfield_type\x18\x04 \x01(\x0e\x32\x11.qdrant.FieldTypeH\x01\x88\x01\x01\x12;\n\x12\x66ield_index_params\x18\x05 \x01(\x0b\x32\x1a.qdrant.PayloadIndexParamsH\x02\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x03\x88\x01\x01\x42\x07\n\x05_waitB\r\n\x0b_field_typeB\x15\n\x13_field_index_paramsB\x0b\n\t_ordering\"\xa0\x01\n\x1a\x44\x65leteFieldIndexCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\nfield_name\x18\x03 \x01(\t\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_ordering\"(\n\x16PayloadIncludeSelector\x12\x0e\n\x06\x66ields\x18\x01 \x03(\t\"(\n\x16PayloadExcludeSelector\x12\x0e\n\x06\x66ields\x18\x01 \x03(\t\"\xa1\x01\n\x13WithPayloadSelector\x12\x10\n\x06\x65nable\x18\x01 \x01(\x08H\x00\x12\x31\n\x07include\x18\x02 \x01(\x0b\x32\x1e.qdrant.PayloadIncludeSelectorH\x00\x12\x31\n\x07\x65xclude\x18\x03 \x01(\x0b\x32\x1e.qdrant.PayloadExcludeSelectorH\x00\x42\x12\n\x10selector_options\"\x82\x01\n\x0cNamedVectors\x12\x32\n\x07vectors\x18\x01 \x03(\x0b\x32!.qdrant.NamedVectors.VectorsEntry\x1a>\n\x0cVectorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.qdrant.Vector:\x02\x38\x01\"\x94\x01\n\x12NamedVectorsOutput\x12\x38\n\x07vectors\x18\x01 \x03(\x0b\x32\'.qdrant.NamedVectorsOutput.VectorsEntry\x1a\x44\n\x0cVectorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.qdrant.VectorOutput:\x02\x38\x01\"g\n\x07Vectors\x12 \n\x06vector\x18\x01 \x01(\x0b\x32\x0e.qdrant.VectorH\x00\x12\'\n\x07vectors\x18\x02 \x01(\x0b\x32\x14.qdrant.NamedVectorsH\x00\x42\x11\n\x0fvectors_options\"y\n\rVectorsOutput\x12&\n\x06vector\x18\x01 \x01(\x0b\x32\x14.qdrant.VectorOutputH\x00\x12-\n\x07vectors\x18\x02 \x01(\x0b\x32\x1a.qdrant.NamedVectorsOutputH\x00\x42\x11\n\x0fvectors_options\" \n\x0fVectorsSelector\x12\r\n\x05names\x18\x01 \x03(\t\"g\n\x13WithVectorsSelector\x12\x10\n\x06\x65nable\x18\x01 \x01(\x08H\x00\x12*\n\x07include\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorsSelectorH\x00\x42\x12\n\x10selector_options\"\x88\x01\n\x18QuantizationSearchParams\x12\x13\n\x06ignore\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07rescore\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x19\n\x0coversampling\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\t\n\x07_ignoreB\n\n\x08_rescoreB\x0f\n\r_oversampling\"\xc8\x01\n\x0cSearchParams\x12\x14\n\x07hnsw_ef\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05\x65xact\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12;\n\x0cquantization\x18\x03 \x01(\x0b\x32 .qdrant.QuantizationSearchParamsH\x02\x88\x01\x01\x12\x19\n\x0cindexed_only\x18\x04 \x01(\x08H\x03\x88\x01\x01\x42\n\n\x08_hnsw_efB\x08\n\x06_exactB\x0f\n\r_quantizationB\x0f\n\r_indexed_only\"\x92\x05\n\x0cSearchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0e\n\x06vector\x18\x02 \x03(\x02\x12\x1e\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x04 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x08 \x01(\x02H\x00\x88\x01\x01\x12\x13\n\x06offset\x18\t \x01(\x04H\x01\x88\x01\x01\x12\x18\n\x0bvector_name\x18\n \x01(\tH\x02\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x0b \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\r \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0e \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x12\x32\n\x0esparse_indices\x18\x0f \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x07\x88\x01\x01\x42\x12\n\x10_score_thresholdB\t\n\x07_offsetB\x0e\n\x0c_vector_nameB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorB\x11\n\x0f_sparse_indicesJ\x04\x08\x05\x10\x06\"\xc8\x01\n\x11SearchBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12+\n\rsearch_points\x18\x02 \x03(\x0b\x32\x14.qdrant.SearchPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xb2\x01\n\nWithLookup\x12\x12\n\ncollection\x18\x01 \x01(\t\x12\x36\n\x0cwith_payload\x18\x02 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelectorH\x00\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x03 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x01\x88\x01\x01\x42\x0f\n\r_with_payloadB\x0f\n\r_with_vectors\"\xd5\x05\n\x11SearchPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0e\n\x06vector\x18\x02 \x03(\x02\x12\x1e\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x31\n\x0cwith_payload\x18\x05 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x00\x88\x01\x01\x12\x18\n\x0bvector_name\x18\x08 \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\t \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x10\n\x08group_by\x18\n \x01(\t\x12\x12\n\ngroup_size\x18\x0b \x01(\r\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x03\x88\x01\x01\x12,\n\x0bwith_lookup\x18\r \x01(\x0b\x32\x12.qdrant.WithLookupH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\x0e \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0f \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x12\x32\n\x0esparse_indices\x18\x10 \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x07\x88\x01\x01\x42\x12\n\x10_score_thresholdB\x0e\n\x0c_vector_nameB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorB\x11\n\x0f_sparse_indices\"}\n\tStartFrom\x12\x0f\n\x05\x66loat\x18\x01 \x01(\x01H\x00\x12\x11\n\x07integer\x18\x02 \x01(\x03H\x00\x12/\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08\x64\x61tetime\x18\x04 \x01(\tH\x00\x42\x07\n\x05value\"\x8a\x01\n\x07OrderBy\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\tdirection\x18\x02 \x01(\x0e\x32\x11.qdrant.DirectionH\x00\x88\x01\x01\x12*\n\nstart_from\x18\x03 \x01(\x0b\x32\x11.qdrant.StartFromH\x01\x88\x01\x01\x42\x0c\n\n_directionB\r\n\x0b_start_from\"\x8e\x04\n\x0cScrollPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\x12$\n\x06offset\x18\x03 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\rH\x01\x88\x01\x01\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\x07 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x08 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x03\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\t \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x04\x88\x01\x01\x12&\n\x08order_by\x18\n \x01(\x0b\x32\x0f.qdrant.OrderByH\x05\x88\x01\x01\x12\x14\n\x07timeout\x18\x0b \x01(\x04H\x06\x88\x01\x01\x42\t\n\x07_offsetB\x08\n\x06_limitB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\x0b\n\t_order_byB\n\n\x08_timeoutJ\x04\x08\x05\x10\x06\"\xa5\x01\n\x0eLookupLocation\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x18\n\x0bvector_name\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x42\x0e\n\x0c_vector_nameB\x15\n\x13_shard_key_selector\"\xcd\x06\n\x0fRecommendPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12!\n\x08positive\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12!\n\x08negative\x18\x03 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x07 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x08 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\t \x01(\x02H\x00\x88\x01\x01\x12\x13\n\x06offset\x18\n \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05using\x18\x0b \x01(\tH\x02\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x0c \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x03\x88\x01\x01\x12\x30\n\x0blookup_from\x18\r \x01(\x0b\x32\x16.qdrant.LookupLocationH\x04\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x05\x88\x01\x01\x12\x30\n\x08strategy\x18\x10 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x06\x88\x01\x01\x12(\n\x10positive_vectors\x18\x11 \x03(\x0b\x32\x0e.qdrant.Vector\x12(\n\x10negative_vectors\x18\x12 \x03(\x0b\x32\x0e.qdrant.Vector\x12\x14\n\x07timeout\x18\x13 \x01(\x04H\x07\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x14 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x08\x88\x01\x01\x42\x12\n\x10_score_thresholdB\t\n\x07_offsetB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\x0b\n\t_strategyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorJ\x04\x08\x06\x10\x07\"\xd1\x01\n\x14RecommendBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x31\n\x10recommend_points\x18\x02 \x03(\x0b\x32\x17.qdrant.RecommendPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\x90\x07\n\x14RecommendPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12!\n\x08positive\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12!\n\x08negative\x18\x03 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\r\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x08 \x01(\x02H\x00\x88\x01\x01\x12\x12\n\x05using\x18\t \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0b \x01(\x0b\x32\x16.qdrant.LookupLocationH\x03\x88\x01\x01\x12\x10\n\x08group_by\x18\x0c \x01(\t\x12\x12\n\ngroup_size\x18\r \x01(\r\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12,\n\x0bwith_lookup\x18\x0f \x01(\x0b\x32\x12.qdrant.WithLookupH\x05\x88\x01\x01\x12\x30\n\x08strategy\x18\x11 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x06\x88\x01\x01\x12(\n\x10positive_vectors\x18\x12 \x03(\x0b\x32\x0e.qdrant.Vector\x12(\n\x10negative_vectors\x18\x13 \x03(\x0b\x32\x0e.qdrant.Vector\x12\x14\n\x07timeout\x18\x14 \x01(\x04H\x07\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x15 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x08\x88\x01\x01\x42\x12\n\x10_score_thresholdB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\x0b\n\t_strategyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"A\n\x0cTargetVector\x12\'\n\x06single\x18\x01 \x01(\x0b\x32\x15.qdrant.VectorExampleH\x00\x42\x08\n\x06target\"[\n\rVectorExample\x12\x1d\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x12 \n\x06vector\x18\x02 \x01(\x0b\x32\x0e.qdrant.VectorH\x00\x42\t\n\x07\x65xample\"f\n\x12\x43ontextExamplePair\x12\'\n\x08positive\x18\x01 \x01(\x0b\x32\x15.qdrant.VectorExample\x12\'\n\x08negative\x18\x02 \x01(\x0b\x32\x15.qdrant.VectorExample\"\x8e\x05\n\x0e\x44iscoverPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12$\n\x06target\x18\x02 \x01(\x0b\x32\x14.qdrant.TargetVector\x12+\n\x07\x63ontext\x18\x03 \x03(\x0b\x32\x1a.qdrant.ContextExamplePair\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x13\n\x06offset\x18\x08 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05using\x18\t \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0b \x01(\x0b\x32\x16.qdrant.LookupLocationH\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\r \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0e \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x42\t\n\x07_offsetB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"\xce\x01\n\x13\x44iscoverBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12/\n\x0f\x64iscover_points\x18\x02 \x03(\x0b\x32\x16.qdrant.DiscoverPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xa5\x02\n\x0b\x43ountPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\x12\x12\n\x05\x65xact\x18\x03 \x01(\x08H\x00\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x04 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x03\x88\x01\x01\x42\x08\n\x06_exactB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\n\n\x08_timeout\"\x9d\x01\n\x0eRecommendInput\x12%\n\x08positive\x18\x01 \x03(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x08negative\x18\x02 \x03(\x0b\x32\x13.qdrant.VectorInput\x12\x30\n\x08strategy\x18\x03 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x00\x88\x01\x01\x42\x0b\n\t_strategy\"`\n\x10\x43ontextInputPair\x12%\n\x08positive\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x08negative\x18\x02 \x01(\x0b\x32\x13.qdrant.VectorInput\"[\n\rDiscoverInput\x12#\n\x06target\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x14.qdrant.ContextInput\"7\n\x0c\x43ontextInput\x12\'\n\x05pairs\x18\x01 \x03(\x0b\x32\x18.qdrant.ContextInputPair\"\xa2\x01\n\x07\x46ormula\x12&\n\nexpression\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12/\n\x08\x64\x65\x66\x61ults\x18\x02 \x03(\x0b\x32\x1d.qdrant.Formula.DefaultsEntry\x1a>\n\rDefaultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xcc\x05\n\nExpression\x12\x12\n\x08\x63onstant\x18\x01 \x01(\x02H\x00\x12\x12\n\x08variable\x18\x02 \x01(\tH\x00\x12&\n\tcondition\x18\x03 \x01(\x0b\x32\x11.qdrant.ConditionH\x00\x12+\n\x0cgeo_distance\x18\x04 \x01(\x0b\x32\x13.qdrant.GeoDistanceH\x00\x12\x12\n\x08\x64\x61tetime\x18\x05 \x01(\tH\x00\x12\x16\n\x0c\x64\x61tetime_key\x18\x06 \x01(\tH\x00\x12&\n\x04mult\x18\x07 \x01(\x0b\x32\x16.qdrant.MultExpressionH\x00\x12$\n\x03sum\x18\x08 \x01(\x0b\x32\x15.qdrant.SumExpressionH\x00\x12$\n\x03\x64iv\x18\t \x01(\x0b\x32\x15.qdrant.DivExpressionH\x00\x12!\n\x03neg\x18\n \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12!\n\x03\x61\x62s\x18\x0b \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12\"\n\x04sqrt\x18\x0c \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12$\n\x03pow\x18\r \x01(\x0b\x32\x15.qdrant.PowExpressionH\x00\x12!\n\x03\x65xp\x18\x0e \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12#\n\x05log10\x18\x0f \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12 \n\x02ln\x18\x10 \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12\x32\n\texp_decay\x18\x11 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x12\x34\n\x0bgauss_decay\x18\x12 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x12\x32\n\tlin_decay\x18\x13 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x42\t\n\x07variant\";\n\x0bGeoDistance\x12 \n\x06origin\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12\n\n\x02to\x18\x02 \x01(\t\"2\n\x0eMultExpression\x12 \n\x04mult\x18\x01 \x03(\x0b\x32\x12.qdrant.Expression\"0\n\rSumExpression\x12\x1f\n\x03sum\x18\x01 \x03(\x0b\x32\x12.qdrant.Expression\"\x86\x01\n\rDivExpression\x12 \n\x04left\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12!\n\x05right\x18\x02 \x01(\x0b\x32\x12.qdrant.Expression\x12\x1c\n\x0f\x62y_zero_default\x18\x03 \x01(\x02H\x00\x88\x01\x01\x42\x12\n\x10_by_zero_default\"W\n\rPowExpression\x12 \n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12$\n\x08\x65xponent\x18\x02 \x01(\x0b\x32\x12.qdrant.Expression\"\xac\x01\n\x15\x44\x65\x63\x61yParamsExpression\x12\x1d\n\x01x\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12\'\n\x06target\x18\x02 \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x88\x01\x01\x12\x12\n\x05scale\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x15\n\x08midpoint\x18\x04 \x01(\x02H\x02\x88\x01\x01\x42\t\n\x07_targetB\x08\n\x06_scaleB\x0b\n\t_midpoint\"U\n\x13NearestInputWithMmr\x12$\n\x07nearest\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12\x18\n\x03mmr\x18\x02 \x01(\x0b\x32\x0b.qdrant.Mmr\"_\n\x03Mmr\x12\x16\n\tdiversity\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x1d\n\x10\x63\x61ndidates_limit\x18\x03 \x01(\rH\x01\x88\x01\x01\x42\x0c\n\n_diversityB\x13\n\x11_candidates_limit\"\x81\x03\n\x05Query\x12&\n\x07nearest\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInputH\x00\x12+\n\trecommend\x18\x02 \x01(\x0b\x32\x16.qdrant.RecommendInputH\x00\x12)\n\x08\x64iscover\x18\x03 \x01(\x0b\x32\x15.qdrant.DiscoverInputH\x00\x12\'\n\x07\x63ontext\x18\x04 \x01(\x0b\x32\x14.qdrant.ContextInputH\x00\x12#\n\x08order_by\x18\x05 \x01(\x0b\x32\x0f.qdrant.OrderByH\x00\x12 \n\x06\x66usion\x18\x06 \x01(\x0e\x32\x0e.qdrant.FusionH\x00\x12 \n\x06sample\x18\x07 \x01(\x0e\x32\x0e.qdrant.SampleH\x00\x12\"\n\x07\x66ormula\x18\x08 \x01(\x0b\x32\x0f.qdrant.FormulaH\x00\x12\x37\n\x10nearest_with_mmr\x18\t \x01(\x0b\x32\x1b.qdrant.NearestInputWithMmrH\x00\x42\t\n\x07variant\"\xfb\x02\n\rPrefetchQuery\x12\'\n\x08prefetch\x18\x01 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x02 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x03 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x05 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x06 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05limit\x18\x07 \x01(\x04H\x05\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x08 \x01(\x0b\x32\x16.qdrant.LookupLocationH\x06\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x08\n\x06_limitB\x0e\n\x0c_lookup_from\"\x85\x06\n\x0bQueryPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x08prefetch\x18\x02 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x03 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x04 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05limit\x18\x08 \x01(\x04H\x05\x88\x01\x01\x12\x13\n\x06offset\x18\t \x01(\x04H\x06\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x07\x88\x01\x01\x12\x36\n\x0cwith_payload\x18\x0b \x01(\x0b\x32\x1b.qdrant.WithPayloadSelectorH\x08\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\t\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\r \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\n\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0e \x01(\x0b\x32\x16.qdrant.LookupLocationH\x0b\x88\x01\x01\x12\x14\n\x07timeout\x18\x0f \x01(\x04H\x0c\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x08\n\x06_limitB\t\n\x07_offsetB\x0f\n\r_with_vectorsB\x0f\n\r_with_payloadB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\x0e\n\x0c_lookup_fromB\n\n\x08_timeout\"\xc5\x01\n\x10QueryBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12)\n\x0cquery_points\x18\x02 \x03(\x0b\x32\x13.qdrant.QueryPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xcc\x06\n\x10QueryPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x08prefetch\x18\x02 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x03 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x04 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x04\x88\x01\x01\x12\x31\n\x0cwith_payload\x18\x08 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\t \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x05\x88\x01\x01\x12\x30\n\x0blookup_from\x18\n \x01(\x0b\x32\x16.qdrant.LookupLocationH\x06\x88\x01\x01\x12\x12\n\x05limit\x18\x0b \x01(\x04H\x07\x88\x01\x01\x12\x17\n\ngroup_size\x18\x0c \x01(\x04H\x08\x88\x01\x01\x12\x10\n\x08group_by\x18\r \x01(\t\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\t\x88\x01\x01\x12,\n\x0bwith_lookup\x18\x0f \x01(\x0b\x32\x12.qdrant.WithLookupH\n\x88\x01\x01\x12\x14\n\x07timeout\x18\x10 \x01(\x04H\x0b\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x11 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x0c\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x08\n\x06_limitB\r\n\x0b_group_sizeB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"\xe0\x02\n\x0b\x46\x61\x63\x65tCounts\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12#\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05\x65xact\x18\x05 \x01(\x08H\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x07 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x08 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x05\x88\x01\x01\x42\t\n\x07_filterB\x08\n\x06_limitB\x08\n\x06_exactB\n\n\x08_timeoutB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selector\"^\n\nFacetValue\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x42\t\n\x07variant\"<\n\x08\x46\x61\x63\x65tHit\x12!\n\x05value\x18\x01 \x01(\x0b\x32\x12.qdrant.FacetValue\x12\r\n\x05\x63ount\x18\x02 \x01(\x04\"\xfa\x02\n\x12SearchMatrixPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12#\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x88\x01\x01\x12\x13\n\x06sample\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x12\n\x05using\x18\x05 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x04\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x07 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x08 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x42\t\n\x07_filterB\t\n\x07_sampleB\x08\n\x06_limitB\x08\n\x06_usingB\n\n\x08_timeoutB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selector\"<\n\x11SearchMatrixPairs\x12\'\n\x05pairs\x18\x01 \x03(\x0b\x32\x18.qdrant.SearchMatrixPair\"Y\n\x10SearchMatrixPair\x12\x1a\n\x01\x61\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x1a\n\x01\x62\x18\x02 \x01(\x0b\x32\x0f.qdrant.PointId\x12\r\n\x05score\x18\x03 \x01(\x02\"m\n\x13SearchMatrixOffsets\x12\x13\n\x0boffsets_row\x18\x01 \x03(\x04\x12\x13\n\x0boffsets_col\x18\x02 \x03(\x04\x12\x0e\n\x06scores\x18\x03 \x03(\x02\x12\x1c\n\x03ids\x18\x04 \x03(\x0b\x32\x0f.qdrant.PointId\"\x95\x12\n\x15PointsUpdateOperation\x12?\n\x06upsert\x18\x01 \x01(\x0b\x32-.qdrant.PointsUpdateOperation.PointStructListH\x00\x12\x37\n\x11\x64\x65lete_deprecated\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorB\x02\x18\x01H\x00\x12?\n\x0bset_payload\x18\x03 \x01(\x0b\x32(.qdrant.PointsUpdateOperation.SetPayloadH\x00\x12K\n\x11overwrite_payload\x18\x04 \x01(\x0b\x32..qdrant.PointsUpdateOperation.OverwritePayloadH\x00\x12\x45\n\x0e\x64\x65lete_payload\x18\x05 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.DeletePayloadH\x00\x12>\n\x18\x63lear_payload_deprecated\x18\x06 \x01(\x0b\x32\x16.qdrant.PointsSelectorB\x02\x18\x01H\x00\x12\x45\n\x0eupdate_vectors\x18\x07 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.UpdateVectorsH\x00\x12\x45\n\x0e\x64\x65lete_vectors\x18\x08 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.DeleteVectorsH\x00\x12\x43\n\rdelete_points\x18\t \x01(\x0b\x32*.qdrant.PointsUpdateOperation.DeletePointsH\x00\x12\x43\n\rclear_payload\x18\n \x01(\x0b\x32*.qdrant.PointsUpdateOperation.ClearPayloadH\x00\x1a\x88\x01\n\x0fPointStructList\x12#\n\x06points\x18\x01 \x03(\x0b\x32\x13.qdrant.PointStruct\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\xc9\x02\n\nSetPayload\x12\x46\n\x07payload\x18\x01 \x03(\x0b\x32\x35.qdrant.PointsUpdateOperation.SetPayload.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x12\x10\n\x03key\x18\x04 \x01(\tH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selectorB\x06\n\x04_key\x1a\xd5\x02\n\x10OverwritePayload\x12L\n\x07payload\x18\x01 \x03(\x0b\x32;.qdrant.PointsUpdateOperation.OverwritePayload.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x12\x10\n\x03key\x18\x04 \x01(\tH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selectorB\x06\n\x04_key\x1a\xb9\x01\n\rDeletePayload\x12\x0c\n\x04keys\x18\x01 \x03(\t\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selector\x1a\x87\x01\n\rUpdateVectors\x12$\n\x06points\x18\x01 \x03(\x0b\x32\x14.qdrant.PointVectors\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\xbc\x01\n\rDeleteVectors\x12/\n\x0fpoints_selector\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12(\n\x07vectors\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorsSelector\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\x88\x01\n\x0c\x44\x65letePoints\x12&\n\x06points\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\x88\x01\n\x0c\x43learPayload\x12&\n\x06points\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selectorB\x0b\n\toperation\"\xb6\x01\n\x11UpdateBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x31\n\noperations\x18\x03 \x03(\x0b\x32\x1d.qdrant.PointsUpdateOperation\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_ordering\"z\n\x17PointsOperationResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.UpdateResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"`\n\x0cUpdateResult\x12\x19\n\x0coperation_id\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12$\n\x06status\x18\x02 \x01(\x0e\x32\x14.qdrant.UpdateStatusB\x0f\n\r_operation_id\"7\n\nOrderValue\x12\r\n\x03int\x18\x01 \x01(\x03H\x00\x12\x0f\n\x05\x66loat\x18\x02 \x01(\x01H\x00\x42\t\n\x07variant\"\xf1\x02\n\x0bScoredPoint\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x07payload\x18\x02 \x03(\x0b\x32 .qdrant.ScoredPoint.PayloadEntry\x12\r\n\x05score\x18\x03 \x01(\x02\x12\x0f\n\x07version\x18\x05 \x01(\x04\x12+\n\x07vectors\x18\x06 \x01(\x0b\x32\x15.qdrant.VectorsOutputH\x00\x88\x01\x01\x12(\n\tshard_key\x18\x07 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x01\x88\x01\x01\x12,\n\x0border_value\x18\x08 \x01(\x0b\x32\x12.qdrant.OrderValueH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsB\x0c\n\n_shard_keyB\x0e\n\x0c_order_valueJ\x04\x08\x04\x10\x05\"\\\n\x07GroupId\x12\x18\n\x0eunsigned_value\x18\x01 \x01(\x04H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x42\x06\n\x04kind\"t\n\nPointGroup\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.GroupId\x12!\n\x04hits\x18\x02 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12&\n\x06lookup\x18\x03 \x01(\x0b\x32\x16.qdrant.RetrievedPoint\"2\n\x0cGroupsResult\x12\"\n\x06groups\x18\x01 \x03(\x0b\x32\x12.qdrant.PointGroup\"p\n\x0eSearchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"o\n\rQueryResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"t\n\x12QueryBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"v\n\x13QueryGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"2\n\x0b\x42\x61tchResult\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\"u\n\x13SearchBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"w\n\x14SearchGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"o\n\rCountResponse\x12#\n\x06result\x18\x01 \x01(\x0b\x32\x13.qdrant.CountResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\xb8\x01\n\x0eScrollResponse\x12.\n\x10next_page_offset\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x88\x01\x01\x12&\n\x06result\x18\x02 \x03(\x0b\x32\x16.qdrant.RetrievedPoint\x12\x0c\n\x04time\x18\x03 \x01(\x01\x12!\n\x05usage\x18\x04 \x01(\x0b\x32\r.qdrant.UsageH\x01\x88\x01\x01\x42\x13\n\x11_next_page_offsetB\x08\n\x06_usage\"\x1c\n\x0b\x43ountResult\x12\r\n\x05\x63ount\x18\x01 \x01(\x04\"\xd7\x02\n\x0eRetrievedPoint\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x34\n\x07payload\x18\x02 \x03(\x0b\x32#.qdrant.RetrievedPoint.PayloadEntry\x12+\n\x07vectors\x18\x04 \x01(\x0b\x32\x15.qdrant.VectorsOutputH\x00\x88\x01\x01\x12(\n\tshard_key\x18\x05 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x01\x88\x01\x01\x12,\n\x0border_value\x18\x06 \x01(\x0b\x32\x12.qdrant.OrderValueH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsB\x0c\n\n_shard_keyB\x0e\n\x0c_order_valueJ\x04\x08\x03\x10\x04\"p\n\x0bGetResponse\x12&\n\x06result\x18\x01 \x03(\x0b\x32\x16.qdrant.RetrievedPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"s\n\x11RecommendResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"x\n\x16RecommendBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"r\n\x10\x44iscoverResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"w\n\x15\x44iscoverBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"z\n\x17RecommendGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"v\n\x13UpdateBatchResponse\x12$\n\x06result\x18\x01 \x03(\x0b\x32\x14.qdrant.UpdateResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"=\n\rFacetResponse\x12\x1e\n\x04hits\x18\x01 \x03(\x0b\x32\x10.qdrant.FacetHit\x12\x0c\n\x04time\x18\x02 \x01(\x01\"\x81\x01\n\x19SearchMatrixPairsResponse\x12)\n\x06result\x18\x01 \x01(\x0b\x32\x19.qdrant.SearchMatrixPairs\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\x85\x01\n\x1bSearchMatrixOffsetsResponse\x12+\n\x06result\x18\x01 \x01(\x0b\x32\x1b.qdrant.SearchMatrixOffsets\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\xac\x01\n\x06\x46ilter\x12!\n\x06should\x18\x01 \x03(\x0b\x32\x11.qdrant.Condition\x12\x1f\n\x04must\x18\x02 \x03(\x0b\x32\x11.qdrant.Condition\x12#\n\x08must_not\x18\x03 \x03(\x0b\x32\x11.qdrant.Condition\x12*\n\nmin_should\x18\x04 \x01(\x0b\x32\x11.qdrant.MinShouldH\x00\x88\x01\x01\x42\r\n\x0b_min_should\"E\n\tMinShould\x12%\n\nconditions\x18\x01 \x03(\x0b\x32\x11.qdrant.Condition\x12\x11\n\tmin_count\x18\x02 \x01(\x04\"\xcb\x02\n\tCondition\x12\'\n\x05\x66ield\x18\x01 \x01(\x0b\x32\x16.qdrant.FieldConditionH\x00\x12,\n\x08is_empty\x18\x02 \x01(\x0b\x32\x18.qdrant.IsEmptyConditionH\x00\x12(\n\x06has_id\x18\x03 \x01(\x0b\x32\x16.qdrant.HasIdConditionH\x00\x12 \n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x12*\n\x07is_null\x18\x05 \x01(\x0b\x32\x17.qdrant.IsNullConditionH\x00\x12)\n\x06nested\x18\x06 \x01(\x0b\x32\x17.qdrant.NestedConditionH\x00\x12\x30\n\nhas_vector\x18\x07 \x01(\x0b\x32\x1a.qdrant.HasVectorConditionH\x00\x42\x12\n\x10\x63ondition_one_of\"\x1f\n\x10IsEmptyCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1e\n\x0fIsNullCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\"1\n\x0eHasIdCondition\x12\x1f\n\x06has_id\x18\x01 \x03(\x0b\x32\x0f.qdrant.PointId\"(\n\x12HasVectorCondition\x12\x12\n\nhas_vector\x18\x01 \x01(\t\">\n\x0fNestedCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\"\xfb\x02\n\x0e\x46ieldCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05match\x18\x02 \x01(\x0b\x32\r.qdrant.Match\x12\x1c\n\x05range\x18\x03 \x01(\x0b\x32\r.qdrant.Range\x12\x30\n\x10geo_bounding_box\x18\x04 \x01(\x0b\x32\x16.qdrant.GeoBoundingBox\x12%\n\ngeo_radius\x18\x05 \x01(\x0b\x32\x11.qdrant.GeoRadius\x12)\n\x0cvalues_count\x18\x06 \x01(\x0b\x32\x13.qdrant.ValuesCount\x12\'\n\x0bgeo_polygon\x18\x07 \x01(\x0b\x32\x12.qdrant.GeoPolygon\x12-\n\x0e\x64\x61tetime_range\x18\x08 \x01(\x0b\x32\x15.qdrant.DatetimeRange\x12\x15\n\x08is_empty\x18\t \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07is_null\x18\n \x01(\x08H\x01\x88\x01\x01\x42\x0b\n\t_is_emptyB\n\n\x08_is_null\"\xb5\x02\n\x05Match\x12\x11\n\x07keyword\x18\x01 \x01(\tH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x03H\x00\x12\x11\n\x07\x62oolean\x18\x03 \x01(\x08H\x00\x12\x0e\n\x04text\x18\x04 \x01(\tH\x00\x12+\n\x08keywords\x18\x05 \x01(\x0b\x32\x17.qdrant.RepeatedStringsH\x00\x12,\n\x08integers\x18\x06 \x01(\x0b\x32\x18.qdrant.RepeatedIntegersH\x00\x12\x33\n\x0f\x65xcept_integers\x18\x07 \x01(\x0b\x32\x18.qdrant.RepeatedIntegersH\x00\x12\x32\n\x0f\x65xcept_keywords\x18\x08 \x01(\x0b\x32\x17.qdrant.RepeatedStringsH\x00\x12\x10\n\x06phrase\x18\t \x01(\tH\x00\x42\r\n\x0bmatch_value\"\"\n\x0fRepeatedStrings\x12\x0f\n\x07strings\x18\x01 \x03(\t\"$\n\x10RepeatedIntegers\x12\x10\n\x08integers\x18\x01 \x03(\x03\"k\n\x05Range\x12\x0f\n\x02lt\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x0f\n\x02gt\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x10\n\x03gte\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x10\n\x03lte\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"\xe3\x01\n\rDatetimeRange\x12+\n\x02lt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12+\n\x02gt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x88\x01\x01\x12,\n\x03gte\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x02\x88\x01\x01\x12,\n\x03lte\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"\\\n\x0eGeoBoundingBox\x12\"\n\x08top_left\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12&\n\x0c\x62ottom_right\x18\x02 \x01(\x0b\x32\x10.qdrant.GeoPoint\"=\n\tGeoRadius\x12 \n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12\x0e\n\x06radius\x18\x02 \x01(\x02\"1\n\rGeoLineString\x12 \n\x06points\x18\x01 \x03(\x0b\x32\x10.qdrant.GeoPoint\"_\n\nGeoPolygon\x12\'\n\x08\x65xterior\x18\x01 \x01(\x0b\x32\x15.qdrant.GeoLineString\x12(\n\tinteriors\x18\x02 \x03(\x0b\x32\x15.qdrant.GeoLineString\"q\n\x0bValuesCount\x12\x0f\n\x02lt\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x0f\n\x02gt\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x10\n\x03gte\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12\x10\n\x03lte\x18\x04 \x01(\x04H\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"u\n\x0ePointsSelector\x12\'\n\x06points\x18\x01 \x01(\x0b\x32\x15.qdrant.PointsIdsListH\x00\x12 \n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x42\x18\n\x16points_selector_one_of\"-\n\rPointsIdsList\x12\x1c\n\x03ids\x18\x01 \x03(\x0b\x32\x0f.qdrant.PointId\"\xd5\x01\n\x0bPointStruct\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x07payload\x18\x03 \x03(\x0b\x32 .qdrant.PointStruct.PayloadEntry\x12%\n\x07vectors\x18\x04 \x01(\x0b\x32\x0f.qdrant.VectorsH\x00\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsJ\x04\x08\x02\x10\x03\"$\n\x08GeoPoint\x12\x0b\n\x03lon\x18\x01 \x01(\x01\x12\x0b\n\x03lat\x18\x02 \x01(\x01\"\x80\x01\n\x05Usage\x12,\n\x08hardware\x18\x01 \x01(\x0b\x32\x15.qdrant.HardwareUsageH\x00\x88\x01\x01\x12.\n\tinference\x18\x02 \x01(\x0b\x32\x16.qdrant.InferenceUsageH\x01\x88\x01\x01\x42\x0b\n\t_hardwareB\x0c\n\n_inference\"\x87\x01\n\x0eInferenceUsage\x12\x32\n\x06models\x18\x01 \x03(\x0b\x32\".qdrant.InferenceUsage.ModelsEntry\x1a\x41\n\x0bModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.qdrant.ModelUsage:\x02\x38\x01\"\x1c\n\nModelUsage\x12\x0e\n\x06tokens\x18\x01 \x01(\x04\"\xbf\x01\n\rHardwareUsage\x12\x0b\n\x03\x63pu\x18\x01 \x01(\x04\x12\x17\n\x0fpayload_io_read\x18\x02 \x01(\x04\x12\x18\n\x10payload_io_write\x18\x03 \x01(\x04\x12\x1d\n\x15payload_index_io_read\x18\x04 \x01(\x04\x12\x1e\n\x16payload_index_io_write\x18\x05 \x01(\x04\x12\x16\n\x0evector_io_read\x18\x06 \x01(\x04\x12\x17\n\x0fvector_io_write\x18\x07 \x01(\x04*5\n\x11WriteOrderingType\x12\x08\n\x04Weak\x10\x00\x12\n\n\x06Medium\x10\x01\x12\n\n\x06Strong\x10\x02*8\n\x13ReadConsistencyType\x12\x07\n\x03\x41ll\x10\x00\x12\x0c\n\x08Majority\x10\x01\x12\n\n\x06Quorum\x10\x02*\xad\x01\n\tFieldType\x12\x14\n\x10\x46ieldTypeKeyword\x10\x00\x12\x14\n\x10\x46ieldTypeInteger\x10\x01\x12\x12\n\x0e\x46ieldTypeFloat\x10\x02\x12\x10\n\x0c\x46ieldTypeGeo\x10\x03\x12\x11\n\rFieldTypeText\x10\x04\x12\x11\n\rFieldTypeBool\x10\x05\x12\x15\n\x11\x46ieldTypeDatetime\x10\x06\x12\x11\n\rFieldTypeUuid\x10\x07*\x1e\n\tDirection\x12\x07\n\x03\x41sc\x10\x00\x12\x08\n\x04\x44\x65sc\x10\x01*D\n\x11RecommendStrategy\x12\x11\n\rAverageVector\x10\x00\x12\r\n\tBestScore\x10\x01\x12\r\n\tSumScores\x10\x02*\x1b\n\x06\x46usion\x12\x07\n\x03RRF\x10\x00\x12\x08\n\x04\x44\x42SF\x10\x01*\x14\n\x06Sample\x12\n\n\x06Random\x10\x00*[\n\x0cUpdateStatus\x12\x17\n\x13UnknownUpdateStatus\x10\x00\x12\x10\n\x0c\x41\x63knowledged\x10\x01\x12\r\n\tCompleted\x10\x02\x12\x11\n\rClockRejected\x10\x03\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - -_WRITEORDERINGTYPE = DESCRIPTOR.enum_types_by_name['WriteOrderingType'] -WriteOrderingType = enum_type_wrapper.EnumTypeWrapper(_WRITEORDERINGTYPE) -_READCONSISTENCYTYPE = DESCRIPTOR.enum_types_by_name['ReadConsistencyType'] -ReadConsistencyType = enum_type_wrapper.EnumTypeWrapper(_READCONSISTENCYTYPE) -_FIELDTYPE = DESCRIPTOR.enum_types_by_name['FieldType'] -FieldType = enum_type_wrapper.EnumTypeWrapper(_FIELDTYPE) -_DIRECTION = DESCRIPTOR.enum_types_by_name['Direction'] -Direction = enum_type_wrapper.EnumTypeWrapper(_DIRECTION) -_RECOMMENDSTRATEGY = DESCRIPTOR.enum_types_by_name['RecommendStrategy'] -RecommendStrategy = enum_type_wrapper.EnumTypeWrapper(_RECOMMENDSTRATEGY) -_FUSION = DESCRIPTOR.enum_types_by_name['Fusion'] -Fusion = enum_type_wrapper.EnumTypeWrapper(_FUSION) -_SAMPLE = DESCRIPTOR.enum_types_by_name['Sample'] -Sample = enum_type_wrapper.EnumTypeWrapper(_SAMPLE) -_UPDATESTATUS = DESCRIPTOR.enum_types_by_name['UpdateStatus'] -UpdateStatus = enum_type_wrapper.EnumTypeWrapper(_UPDATESTATUS) -Weak = 0 -Medium = 1 -Strong = 2 -All = 0 -Majority = 1 -Quorum = 2 -FieldTypeKeyword = 0 -FieldTypeInteger = 1 -FieldTypeFloat = 2 -FieldTypeGeo = 3 -FieldTypeText = 4 -FieldTypeBool = 5 -FieldTypeDatetime = 6 -FieldTypeUuid = 7 -Asc = 0 -Desc = 1 -AverageVector = 0 -BestScore = 1 -SumScores = 2 -RRF = 0 -DBSF = 1 -Random = 0 -UnknownUpdateStatus = 0 -Acknowledged = 1 -Completed = 2 -ClockRejected = 3 - - -_WRITEORDERING = DESCRIPTOR.message_types_by_name['WriteOrdering'] -_READCONSISTENCY = DESCRIPTOR.message_types_by_name['ReadConsistency'] -_POINTID = DESCRIPTOR.message_types_by_name['PointId'] -_SPARSEINDICES = DESCRIPTOR.message_types_by_name['SparseIndices'] -_DOCUMENT = DESCRIPTOR.message_types_by_name['Document'] -_DOCUMENT_OPTIONSENTRY = _DOCUMENT.nested_types_by_name['OptionsEntry'] -_IMAGE = DESCRIPTOR.message_types_by_name['Image'] -_IMAGE_OPTIONSENTRY = _IMAGE.nested_types_by_name['OptionsEntry'] -_INFERENCEOBJECT = DESCRIPTOR.message_types_by_name['InferenceObject'] -_INFERENCEOBJECT_OPTIONSENTRY = _INFERENCEOBJECT.nested_types_by_name['OptionsEntry'] -_VECTOR = DESCRIPTOR.message_types_by_name['Vector'] -_VECTOROUTPUT = DESCRIPTOR.message_types_by_name['VectorOutput'] -_DENSEVECTOR = DESCRIPTOR.message_types_by_name['DenseVector'] -_SPARSEVECTOR = DESCRIPTOR.message_types_by_name['SparseVector'] -_MULTIDENSEVECTOR = DESCRIPTOR.message_types_by_name['MultiDenseVector'] -_VECTORINPUT = DESCRIPTOR.message_types_by_name['VectorInput'] -_SHARDKEYSELECTOR = DESCRIPTOR.message_types_by_name['ShardKeySelector'] -_UPSERTPOINTS = DESCRIPTOR.message_types_by_name['UpsertPoints'] -_DELETEPOINTS = DESCRIPTOR.message_types_by_name['DeletePoints'] -_GETPOINTS = DESCRIPTOR.message_types_by_name['GetPoints'] -_UPDATEPOINTVECTORS = DESCRIPTOR.message_types_by_name['UpdatePointVectors'] -_POINTVECTORS = DESCRIPTOR.message_types_by_name['PointVectors'] -_DELETEPOINTVECTORS = DESCRIPTOR.message_types_by_name['DeletePointVectors'] -_SETPAYLOADPOINTS = DESCRIPTOR.message_types_by_name['SetPayloadPoints'] -_SETPAYLOADPOINTS_PAYLOADENTRY = _SETPAYLOADPOINTS.nested_types_by_name['PayloadEntry'] -_DELETEPAYLOADPOINTS = DESCRIPTOR.message_types_by_name['DeletePayloadPoints'] -_CLEARPAYLOADPOINTS = DESCRIPTOR.message_types_by_name['ClearPayloadPoints'] -_CREATEFIELDINDEXCOLLECTION = DESCRIPTOR.message_types_by_name['CreateFieldIndexCollection'] -_DELETEFIELDINDEXCOLLECTION = DESCRIPTOR.message_types_by_name['DeleteFieldIndexCollection'] -_PAYLOADINCLUDESELECTOR = DESCRIPTOR.message_types_by_name['PayloadIncludeSelector'] -_PAYLOADEXCLUDESELECTOR = DESCRIPTOR.message_types_by_name['PayloadExcludeSelector'] -_WITHPAYLOADSELECTOR = DESCRIPTOR.message_types_by_name['WithPayloadSelector'] -_NAMEDVECTORS = DESCRIPTOR.message_types_by_name['NamedVectors'] -_NAMEDVECTORS_VECTORSENTRY = _NAMEDVECTORS.nested_types_by_name['VectorsEntry'] -_NAMEDVECTORSOUTPUT = DESCRIPTOR.message_types_by_name['NamedVectorsOutput'] -_NAMEDVECTORSOUTPUT_VECTORSENTRY = _NAMEDVECTORSOUTPUT.nested_types_by_name['VectorsEntry'] -_VECTORS = DESCRIPTOR.message_types_by_name['Vectors'] -_VECTORSOUTPUT = DESCRIPTOR.message_types_by_name['VectorsOutput'] -_VECTORSSELECTOR = DESCRIPTOR.message_types_by_name['VectorsSelector'] -_WITHVECTORSSELECTOR = DESCRIPTOR.message_types_by_name['WithVectorsSelector'] -_QUANTIZATIONSEARCHPARAMS = DESCRIPTOR.message_types_by_name['QuantizationSearchParams'] -_SEARCHPARAMS = DESCRIPTOR.message_types_by_name['SearchParams'] -_SEARCHPOINTS = DESCRIPTOR.message_types_by_name['SearchPoints'] -_SEARCHBATCHPOINTS = DESCRIPTOR.message_types_by_name['SearchBatchPoints'] -_WITHLOOKUP = DESCRIPTOR.message_types_by_name['WithLookup'] -_SEARCHPOINTGROUPS = DESCRIPTOR.message_types_by_name['SearchPointGroups'] -_STARTFROM = DESCRIPTOR.message_types_by_name['StartFrom'] -_ORDERBY = DESCRIPTOR.message_types_by_name['OrderBy'] -_SCROLLPOINTS = DESCRIPTOR.message_types_by_name['ScrollPoints'] -_LOOKUPLOCATION = DESCRIPTOR.message_types_by_name['LookupLocation'] -_RECOMMENDPOINTS = DESCRIPTOR.message_types_by_name['RecommendPoints'] -_RECOMMENDBATCHPOINTS = DESCRIPTOR.message_types_by_name['RecommendBatchPoints'] -_RECOMMENDPOINTGROUPS = DESCRIPTOR.message_types_by_name['RecommendPointGroups'] -_TARGETVECTOR = DESCRIPTOR.message_types_by_name['TargetVector'] -_VECTOREXAMPLE = DESCRIPTOR.message_types_by_name['VectorExample'] -_CONTEXTEXAMPLEPAIR = DESCRIPTOR.message_types_by_name['ContextExamplePair'] -_DISCOVERPOINTS = DESCRIPTOR.message_types_by_name['DiscoverPoints'] -_DISCOVERBATCHPOINTS = DESCRIPTOR.message_types_by_name['DiscoverBatchPoints'] -_COUNTPOINTS = DESCRIPTOR.message_types_by_name['CountPoints'] -_RECOMMENDINPUT = DESCRIPTOR.message_types_by_name['RecommendInput'] -_CONTEXTINPUTPAIR = DESCRIPTOR.message_types_by_name['ContextInputPair'] -_DISCOVERINPUT = DESCRIPTOR.message_types_by_name['DiscoverInput'] -_CONTEXTINPUT = DESCRIPTOR.message_types_by_name['ContextInput'] -_FORMULA = DESCRIPTOR.message_types_by_name['Formula'] -_FORMULA_DEFAULTSENTRY = _FORMULA.nested_types_by_name['DefaultsEntry'] -_EXPRESSION = DESCRIPTOR.message_types_by_name['Expression'] -_GEODISTANCE = DESCRIPTOR.message_types_by_name['GeoDistance'] -_MULTEXPRESSION = DESCRIPTOR.message_types_by_name['MultExpression'] -_SUMEXPRESSION = DESCRIPTOR.message_types_by_name['SumExpression'] -_DIVEXPRESSION = DESCRIPTOR.message_types_by_name['DivExpression'] -_POWEXPRESSION = DESCRIPTOR.message_types_by_name['PowExpression'] -_DECAYPARAMSEXPRESSION = DESCRIPTOR.message_types_by_name['DecayParamsExpression'] -_NEARESTINPUTWITHMMR = DESCRIPTOR.message_types_by_name['NearestInputWithMmr'] -_MMR = DESCRIPTOR.message_types_by_name['Mmr'] -_QUERY = DESCRIPTOR.message_types_by_name['Query'] -_PREFETCHQUERY = DESCRIPTOR.message_types_by_name['PrefetchQuery'] -_QUERYPOINTS = DESCRIPTOR.message_types_by_name['QueryPoints'] -_QUERYBATCHPOINTS = DESCRIPTOR.message_types_by_name['QueryBatchPoints'] -_QUERYPOINTGROUPS = DESCRIPTOR.message_types_by_name['QueryPointGroups'] -_FACETCOUNTS = DESCRIPTOR.message_types_by_name['FacetCounts'] -_FACETVALUE = DESCRIPTOR.message_types_by_name['FacetValue'] -_FACETHIT = DESCRIPTOR.message_types_by_name['FacetHit'] -_SEARCHMATRIXPOINTS = DESCRIPTOR.message_types_by_name['SearchMatrixPoints'] -_SEARCHMATRIXPAIRS = DESCRIPTOR.message_types_by_name['SearchMatrixPairs'] -_SEARCHMATRIXPAIR = DESCRIPTOR.message_types_by_name['SearchMatrixPair'] -_SEARCHMATRIXOFFSETS = DESCRIPTOR.message_types_by_name['SearchMatrixOffsets'] -_POINTSUPDATEOPERATION = DESCRIPTOR.message_types_by_name['PointsUpdateOperation'] -_POINTSUPDATEOPERATION_POINTSTRUCTLIST = _POINTSUPDATEOPERATION.nested_types_by_name['PointStructList'] -_POINTSUPDATEOPERATION_SETPAYLOAD = _POINTSUPDATEOPERATION.nested_types_by_name['SetPayload'] -_POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY = _POINTSUPDATEOPERATION_SETPAYLOAD.nested_types_by_name['PayloadEntry'] -_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD = _POINTSUPDATEOPERATION.nested_types_by_name['OverwritePayload'] -_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY = _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD.nested_types_by_name['PayloadEntry'] -_POINTSUPDATEOPERATION_DELETEPAYLOAD = _POINTSUPDATEOPERATION.nested_types_by_name['DeletePayload'] -_POINTSUPDATEOPERATION_UPDATEVECTORS = _POINTSUPDATEOPERATION.nested_types_by_name['UpdateVectors'] -_POINTSUPDATEOPERATION_DELETEVECTORS = _POINTSUPDATEOPERATION.nested_types_by_name['DeleteVectors'] -_POINTSUPDATEOPERATION_DELETEPOINTS = _POINTSUPDATEOPERATION.nested_types_by_name['DeletePoints'] -_POINTSUPDATEOPERATION_CLEARPAYLOAD = _POINTSUPDATEOPERATION.nested_types_by_name['ClearPayload'] -_UPDATEBATCHPOINTS = DESCRIPTOR.message_types_by_name['UpdateBatchPoints'] -_POINTSOPERATIONRESPONSE = DESCRIPTOR.message_types_by_name['PointsOperationResponse'] -_UPDATERESULT = DESCRIPTOR.message_types_by_name['UpdateResult'] -_ORDERVALUE = DESCRIPTOR.message_types_by_name['OrderValue'] -_SCOREDPOINT = DESCRIPTOR.message_types_by_name['ScoredPoint'] -_SCOREDPOINT_PAYLOADENTRY = _SCOREDPOINT.nested_types_by_name['PayloadEntry'] -_GROUPID = DESCRIPTOR.message_types_by_name['GroupId'] -_POINTGROUP = DESCRIPTOR.message_types_by_name['PointGroup'] -_GROUPSRESULT = DESCRIPTOR.message_types_by_name['GroupsResult'] -_SEARCHRESPONSE = DESCRIPTOR.message_types_by_name['SearchResponse'] -_QUERYRESPONSE = DESCRIPTOR.message_types_by_name['QueryResponse'] -_QUERYBATCHRESPONSE = DESCRIPTOR.message_types_by_name['QueryBatchResponse'] -_QUERYGROUPSRESPONSE = DESCRIPTOR.message_types_by_name['QueryGroupsResponse'] -_BATCHRESULT = DESCRIPTOR.message_types_by_name['BatchResult'] -_SEARCHBATCHRESPONSE = DESCRIPTOR.message_types_by_name['SearchBatchResponse'] -_SEARCHGROUPSRESPONSE = DESCRIPTOR.message_types_by_name['SearchGroupsResponse'] -_COUNTRESPONSE = DESCRIPTOR.message_types_by_name['CountResponse'] -_SCROLLRESPONSE = DESCRIPTOR.message_types_by_name['ScrollResponse'] -_COUNTRESULT = DESCRIPTOR.message_types_by_name['CountResult'] -_RETRIEVEDPOINT = DESCRIPTOR.message_types_by_name['RetrievedPoint'] -_RETRIEVEDPOINT_PAYLOADENTRY = _RETRIEVEDPOINT.nested_types_by_name['PayloadEntry'] -_GETRESPONSE = DESCRIPTOR.message_types_by_name['GetResponse'] -_RECOMMENDRESPONSE = DESCRIPTOR.message_types_by_name['RecommendResponse'] -_RECOMMENDBATCHRESPONSE = DESCRIPTOR.message_types_by_name['RecommendBatchResponse'] -_DISCOVERRESPONSE = DESCRIPTOR.message_types_by_name['DiscoverResponse'] -_DISCOVERBATCHRESPONSE = DESCRIPTOR.message_types_by_name['DiscoverBatchResponse'] -_RECOMMENDGROUPSRESPONSE = DESCRIPTOR.message_types_by_name['RecommendGroupsResponse'] -_UPDATEBATCHRESPONSE = DESCRIPTOR.message_types_by_name['UpdateBatchResponse'] -_FACETRESPONSE = DESCRIPTOR.message_types_by_name['FacetResponse'] -_SEARCHMATRIXPAIRSRESPONSE = DESCRIPTOR.message_types_by_name['SearchMatrixPairsResponse'] -_SEARCHMATRIXOFFSETSRESPONSE = DESCRIPTOR.message_types_by_name['SearchMatrixOffsetsResponse'] -_FILTER = DESCRIPTOR.message_types_by_name['Filter'] -_MINSHOULD = DESCRIPTOR.message_types_by_name['MinShould'] -_CONDITION = DESCRIPTOR.message_types_by_name['Condition'] -_ISEMPTYCONDITION = DESCRIPTOR.message_types_by_name['IsEmptyCondition'] -_ISNULLCONDITION = DESCRIPTOR.message_types_by_name['IsNullCondition'] -_HASIDCONDITION = DESCRIPTOR.message_types_by_name['HasIdCondition'] -_HASVECTORCONDITION = DESCRIPTOR.message_types_by_name['HasVectorCondition'] -_NESTEDCONDITION = DESCRIPTOR.message_types_by_name['NestedCondition'] -_FIELDCONDITION = DESCRIPTOR.message_types_by_name['FieldCondition'] -_MATCH = DESCRIPTOR.message_types_by_name['Match'] -_REPEATEDSTRINGS = DESCRIPTOR.message_types_by_name['RepeatedStrings'] -_REPEATEDINTEGERS = DESCRIPTOR.message_types_by_name['RepeatedIntegers'] -_RANGE = DESCRIPTOR.message_types_by_name['Range'] -_DATETIMERANGE = DESCRIPTOR.message_types_by_name['DatetimeRange'] -_GEOBOUNDINGBOX = DESCRIPTOR.message_types_by_name['GeoBoundingBox'] -_GEORADIUS = DESCRIPTOR.message_types_by_name['GeoRadius'] -_GEOLINESTRING = DESCRIPTOR.message_types_by_name['GeoLineString'] -_GEOPOLYGON = DESCRIPTOR.message_types_by_name['GeoPolygon'] -_VALUESCOUNT = DESCRIPTOR.message_types_by_name['ValuesCount'] -_POINTSSELECTOR = DESCRIPTOR.message_types_by_name['PointsSelector'] -_POINTSIDSLIST = DESCRIPTOR.message_types_by_name['PointsIdsList'] -_POINTSTRUCT = DESCRIPTOR.message_types_by_name['PointStruct'] -_POINTSTRUCT_PAYLOADENTRY = _POINTSTRUCT.nested_types_by_name['PayloadEntry'] -_GEOPOINT = DESCRIPTOR.message_types_by_name['GeoPoint'] -_USAGE = DESCRIPTOR.message_types_by_name['Usage'] -_INFERENCEUSAGE = DESCRIPTOR.message_types_by_name['InferenceUsage'] -_INFERENCEUSAGE_MODELSENTRY = _INFERENCEUSAGE.nested_types_by_name['ModelsEntry'] -_MODELUSAGE = DESCRIPTOR.message_types_by_name['ModelUsage'] -_HARDWAREUSAGE = DESCRIPTOR.message_types_by_name['HardwareUsage'] -WriteOrdering = _reflection.GeneratedProtocolMessageType('WriteOrdering', (_message.Message,), { - 'DESCRIPTOR' : _WRITEORDERING, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.WriteOrdering) - }) -_sym_db.RegisterMessage(WriteOrdering) - -ReadConsistency = _reflection.GeneratedProtocolMessageType('ReadConsistency', (_message.Message,), { - 'DESCRIPTOR' : _READCONSISTENCY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ReadConsistency) - }) -_sym_db.RegisterMessage(ReadConsistency) - -PointId = _reflection.GeneratedProtocolMessageType('PointId', (_message.Message,), { - 'DESCRIPTOR' : _POINTID, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointId) - }) -_sym_db.RegisterMessage(PointId) - -SparseIndices = _reflection.GeneratedProtocolMessageType('SparseIndices', (_message.Message,), { - 'DESCRIPTOR' : _SPARSEINDICES, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseIndices) - }) -_sym_db.RegisterMessage(SparseIndices) - -Document = _reflection.GeneratedProtocolMessageType('Document', (_message.Message,), { - - 'OptionsEntry' : _reflection.GeneratedProtocolMessageType('OptionsEntry', (_message.Message,), { - 'DESCRIPTOR' : _DOCUMENT_OPTIONSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Document.OptionsEntry) - }) - , - 'DESCRIPTOR' : _DOCUMENT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Document) - }) -_sym_db.RegisterMessage(Document) -_sym_db.RegisterMessage(Document.OptionsEntry) - -Image = _reflection.GeneratedProtocolMessageType('Image', (_message.Message,), { - - 'OptionsEntry' : _reflection.GeneratedProtocolMessageType('OptionsEntry', (_message.Message,), { - 'DESCRIPTOR' : _IMAGE_OPTIONSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Image.OptionsEntry) - }) - , - 'DESCRIPTOR' : _IMAGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Image) - }) -_sym_db.RegisterMessage(Image) -_sym_db.RegisterMessage(Image.OptionsEntry) - -InferenceObject = _reflection.GeneratedProtocolMessageType('InferenceObject', (_message.Message,), { - - 'OptionsEntry' : _reflection.GeneratedProtocolMessageType('OptionsEntry', (_message.Message,), { - 'DESCRIPTOR' : _INFERENCEOBJECT_OPTIONSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.InferenceObject.OptionsEntry) - }) - , - 'DESCRIPTOR' : _INFERENCEOBJECT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.InferenceObject) - }) -_sym_db.RegisterMessage(InferenceObject) -_sym_db.RegisterMessage(InferenceObject.OptionsEntry) - -Vector = _reflection.GeneratedProtocolMessageType('Vector', (_message.Message,), { - 'DESCRIPTOR' : _VECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Vector) - }) -_sym_db.RegisterMessage(Vector) - -VectorOutput = _reflection.GeneratedProtocolMessageType('VectorOutput', (_message.Message,), { - 'DESCRIPTOR' : _VECTOROUTPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorOutput) - }) -_sym_db.RegisterMessage(VectorOutput) - -DenseVector = _reflection.GeneratedProtocolMessageType('DenseVector', (_message.Message,), { - 'DESCRIPTOR' : _DENSEVECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DenseVector) - }) -_sym_db.RegisterMessage(DenseVector) - -SparseVector = _reflection.GeneratedProtocolMessageType('SparseVector', (_message.Message,), { - 'DESCRIPTOR' : _SPARSEVECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SparseVector) - }) -_sym_db.RegisterMessage(SparseVector) - -MultiDenseVector = _reflection.GeneratedProtocolMessageType('MultiDenseVector', (_message.Message,), { - 'DESCRIPTOR' : _MULTIDENSEVECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MultiDenseVector) - }) -_sym_db.RegisterMessage(MultiDenseVector) - -VectorInput = _reflection.GeneratedProtocolMessageType('VectorInput', (_message.Message,), { - 'DESCRIPTOR' : _VECTORINPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorInput) - }) -_sym_db.RegisterMessage(VectorInput) - -ShardKeySelector = _reflection.GeneratedProtocolMessageType('ShardKeySelector', (_message.Message,), { - 'DESCRIPTOR' : _SHARDKEYSELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ShardKeySelector) - }) -_sym_db.RegisterMessage(ShardKeySelector) - -UpsertPoints = _reflection.GeneratedProtocolMessageType('UpsertPoints', (_message.Message,), { - 'DESCRIPTOR' : _UPSERTPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpsertPoints) - }) -_sym_db.RegisterMessage(UpsertPoints) - -DeletePoints = _reflection.GeneratedProtocolMessageType('DeletePoints', (_message.Message,), { - 'DESCRIPTOR' : _DELETEPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeletePoints) - }) -_sym_db.RegisterMessage(DeletePoints) - -GetPoints = _reflection.GeneratedProtocolMessageType('GetPoints', (_message.Message,), { - 'DESCRIPTOR' : _GETPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GetPoints) - }) -_sym_db.RegisterMessage(GetPoints) - -UpdatePointVectors = _reflection.GeneratedProtocolMessageType('UpdatePointVectors', (_message.Message,), { - 'DESCRIPTOR' : _UPDATEPOINTVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdatePointVectors) - }) -_sym_db.RegisterMessage(UpdatePointVectors) - -PointVectors = _reflection.GeneratedProtocolMessageType('PointVectors', (_message.Message,), { - 'DESCRIPTOR' : _POINTVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointVectors) - }) -_sym_db.RegisterMessage(PointVectors) - -DeletePointVectors = _reflection.GeneratedProtocolMessageType('DeletePointVectors', (_message.Message,), { - 'DESCRIPTOR' : _DELETEPOINTVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeletePointVectors) - }) -_sym_db.RegisterMessage(DeletePointVectors) - -SetPayloadPoints = _reflection.GeneratedProtocolMessageType('SetPayloadPoints', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _SETPAYLOADPOINTS_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SetPayloadPoints.PayloadEntry) - }) - , - 'DESCRIPTOR' : _SETPAYLOADPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SetPayloadPoints) - }) -_sym_db.RegisterMessage(SetPayloadPoints) -_sym_db.RegisterMessage(SetPayloadPoints.PayloadEntry) - -DeletePayloadPoints = _reflection.GeneratedProtocolMessageType('DeletePayloadPoints', (_message.Message,), { - 'DESCRIPTOR' : _DELETEPAYLOADPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeletePayloadPoints) - }) -_sym_db.RegisterMessage(DeletePayloadPoints) - -ClearPayloadPoints = _reflection.GeneratedProtocolMessageType('ClearPayloadPoints', (_message.Message,), { - 'DESCRIPTOR' : _CLEARPAYLOADPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ClearPayloadPoints) - }) -_sym_db.RegisterMessage(ClearPayloadPoints) - -CreateFieldIndexCollection = _reflection.GeneratedProtocolMessageType('CreateFieldIndexCollection', (_message.Message,), { - 'DESCRIPTOR' : _CREATEFIELDINDEXCOLLECTION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateFieldIndexCollection) - }) -_sym_db.RegisterMessage(CreateFieldIndexCollection) - -DeleteFieldIndexCollection = _reflection.GeneratedProtocolMessageType('DeleteFieldIndexCollection', (_message.Message,), { - 'DESCRIPTOR' : _DELETEFIELDINDEXCOLLECTION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteFieldIndexCollection) - }) -_sym_db.RegisterMessage(DeleteFieldIndexCollection) - -PayloadIncludeSelector = _reflection.GeneratedProtocolMessageType('PayloadIncludeSelector', (_message.Message,), { - 'DESCRIPTOR' : _PAYLOADINCLUDESELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PayloadIncludeSelector) - }) -_sym_db.RegisterMessage(PayloadIncludeSelector) - -PayloadExcludeSelector = _reflection.GeneratedProtocolMessageType('PayloadExcludeSelector', (_message.Message,), { - 'DESCRIPTOR' : _PAYLOADEXCLUDESELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PayloadExcludeSelector) - }) -_sym_db.RegisterMessage(PayloadExcludeSelector) - -WithPayloadSelector = _reflection.GeneratedProtocolMessageType('WithPayloadSelector', (_message.Message,), { - 'DESCRIPTOR' : _WITHPAYLOADSELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.WithPayloadSelector) - }) -_sym_db.RegisterMessage(WithPayloadSelector) - -NamedVectors = _reflection.GeneratedProtocolMessageType('NamedVectors', (_message.Message,), { - - 'VectorsEntry' : _reflection.GeneratedProtocolMessageType('VectorsEntry', (_message.Message,), { - 'DESCRIPTOR' : _NAMEDVECTORS_VECTORSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NamedVectors.VectorsEntry) - }) - , - 'DESCRIPTOR' : _NAMEDVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NamedVectors) - }) -_sym_db.RegisterMessage(NamedVectors) -_sym_db.RegisterMessage(NamedVectors.VectorsEntry) - -NamedVectorsOutput = _reflection.GeneratedProtocolMessageType('NamedVectorsOutput', (_message.Message,), { - - 'VectorsEntry' : _reflection.GeneratedProtocolMessageType('VectorsEntry', (_message.Message,), { - 'DESCRIPTOR' : _NAMEDVECTORSOUTPUT_VECTORSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NamedVectorsOutput.VectorsEntry) - }) - , - 'DESCRIPTOR' : _NAMEDVECTORSOUTPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NamedVectorsOutput) - }) -_sym_db.RegisterMessage(NamedVectorsOutput) -_sym_db.RegisterMessage(NamedVectorsOutput.VectorsEntry) - -Vectors = _reflection.GeneratedProtocolMessageType('Vectors', (_message.Message,), { - 'DESCRIPTOR' : _VECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Vectors) - }) -_sym_db.RegisterMessage(Vectors) - -VectorsOutput = _reflection.GeneratedProtocolMessageType('VectorsOutput', (_message.Message,), { - 'DESCRIPTOR' : _VECTORSOUTPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorsOutput) - }) -_sym_db.RegisterMessage(VectorsOutput) - -VectorsSelector = _reflection.GeneratedProtocolMessageType('VectorsSelector', (_message.Message,), { - 'DESCRIPTOR' : _VECTORSSELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorsSelector) - }) -_sym_db.RegisterMessage(VectorsSelector) - -WithVectorsSelector = _reflection.GeneratedProtocolMessageType('WithVectorsSelector', (_message.Message,), { - 'DESCRIPTOR' : _WITHVECTORSSELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.WithVectorsSelector) - }) -_sym_db.RegisterMessage(WithVectorsSelector) - -QuantizationSearchParams = _reflection.GeneratedProtocolMessageType('QuantizationSearchParams', (_message.Message,), { - 'DESCRIPTOR' : _QUANTIZATIONSEARCHPARAMS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QuantizationSearchParams) - }) -_sym_db.RegisterMessage(QuantizationSearchParams) - -SearchParams = _reflection.GeneratedProtocolMessageType('SearchParams', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHPARAMS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchParams) - }) -_sym_db.RegisterMessage(SearchParams) - -SearchPoints = _reflection.GeneratedProtocolMessageType('SearchPoints', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchPoints) - }) -_sym_db.RegisterMessage(SearchPoints) - -SearchBatchPoints = _reflection.GeneratedProtocolMessageType('SearchBatchPoints', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHBATCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchBatchPoints) - }) -_sym_db.RegisterMessage(SearchBatchPoints) - -WithLookup = _reflection.GeneratedProtocolMessageType('WithLookup', (_message.Message,), { - 'DESCRIPTOR' : _WITHLOOKUP, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.WithLookup) - }) -_sym_db.RegisterMessage(WithLookup) - -SearchPointGroups = _reflection.GeneratedProtocolMessageType('SearchPointGroups', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHPOINTGROUPS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchPointGroups) - }) -_sym_db.RegisterMessage(SearchPointGroups) - -StartFrom = _reflection.GeneratedProtocolMessageType('StartFrom', (_message.Message,), { - 'DESCRIPTOR' : _STARTFROM, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.StartFrom) - }) -_sym_db.RegisterMessage(StartFrom) - -OrderBy = _reflection.GeneratedProtocolMessageType('OrderBy', (_message.Message,), { - 'DESCRIPTOR' : _ORDERBY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.OrderBy) - }) -_sym_db.RegisterMessage(OrderBy) - -ScrollPoints = _reflection.GeneratedProtocolMessageType('ScrollPoints', (_message.Message,), { - 'DESCRIPTOR' : _SCROLLPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ScrollPoints) - }) -_sym_db.RegisterMessage(ScrollPoints) - -LookupLocation = _reflection.GeneratedProtocolMessageType('LookupLocation', (_message.Message,), { - 'DESCRIPTOR' : _LOOKUPLOCATION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.LookupLocation) - }) -_sym_db.RegisterMessage(LookupLocation) - -RecommendPoints = _reflection.GeneratedProtocolMessageType('RecommendPoints', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendPoints) - }) -_sym_db.RegisterMessage(RecommendPoints) - -RecommendBatchPoints = _reflection.GeneratedProtocolMessageType('RecommendBatchPoints', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDBATCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendBatchPoints) - }) -_sym_db.RegisterMessage(RecommendBatchPoints) - -RecommendPointGroups = _reflection.GeneratedProtocolMessageType('RecommendPointGroups', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDPOINTGROUPS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendPointGroups) - }) -_sym_db.RegisterMessage(RecommendPointGroups) - -TargetVector = _reflection.GeneratedProtocolMessageType('TargetVector', (_message.Message,), { - 'DESCRIPTOR' : _TARGETVECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.TargetVector) - }) -_sym_db.RegisterMessage(TargetVector) - -VectorExample = _reflection.GeneratedProtocolMessageType('VectorExample', (_message.Message,), { - 'DESCRIPTOR' : _VECTOREXAMPLE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.VectorExample) - }) -_sym_db.RegisterMessage(VectorExample) - -ContextExamplePair = _reflection.GeneratedProtocolMessageType('ContextExamplePair', (_message.Message,), { - 'DESCRIPTOR' : _CONTEXTEXAMPLEPAIR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ContextExamplePair) - }) -_sym_db.RegisterMessage(ContextExamplePair) - -DiscoverPoints = _reflection.GeneratedProtocolMessageType('DiscoverPoints', (_message.Message,), { - 'DESCRIPTOR' : _DISCOVERPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DiscoverPoints) - }) -_sym_db.RegisterMessage(DiscoverPoints) - -DiscoverBatchPoints = _reflection.GeneratedProtocolMessageType('DiscoverBatchPoints', (_message.Message,), { - 'DESCRIPTOR' : _DISCOVERBATCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DiscoverBatchPoints) - }) -_sym_db.RegisterMessage(DiscoverBatchPoints) - -CountPoints = _reflection.GeneratedProtocolMessageType('CountPoints', (_message.Message,), { - 'DESCRIPTOR' : _COUNTPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CountPoints) - }) -_sym_db.RegisterMessage(CountPoints) - -RecommendInput = _reflection.GeneratedProtocolMessageType('RecommendInput', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDINPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendInput) - }) -_sym_db.RegisterMessage(RecommendInput) - -ContextInputPair = _reflection.GeneratedProtocolMessageType('ContextInputPair', (_message.Message,), { - 'DESCRIPTOR' : _CONTEXTINPUTPAIR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ContextInputPair) - }) -_sym_db.RegisterMessage(ContextInputPair) - -DiscoverInput = _reflection.GeneratedProtocolMessageType('DiscoverInput', (_message.Message,), { - 'DESCRIPTOR' : _DISCOVERINPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DiscoverInput) - }) -_sym_db.RegisterMessage(DiscoverInput) - -ContextInput = _reflection.GeneratedProtocolMessageType('ContextInput', (_message.Message,), { - 'DESCRIPTOR' : _CONTEXTINPUT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ContextInput) - }) -_sym_db.RegisterMessage(ContextInput) - -Formula = _reflection.GeneratedProtocolMessageType('Formula', (_message.Message,), { - - 'DefaultsEntry' : _reflection.GeneratedProtocolMessageType('DefaultsEntry', (_message.Message,), { - 'DESCRIPTOR' : _FORMULA_DEFAULTSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Formula.DefaultsEntry) - }) - , - 'DESCRIPTOR' : _FORMULA, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Formula) - }) -_sym_db.RegisterMessage(Formula) -_sym_db.RegisterMessage(Formula.DefaultsEntry) - -Expression = _reflection.GeneratedProtocolMessageType('Expression', (_message.Message,), { - 'DESCRIPTOR' : _EXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Expression) - }) -_sym_db.RegisterMessage(Expression) - -GeoDistance = _reflection.GeneratedProtocolMessageType('GeoDistance', (_message.Message,), { - 'DESCRIPTOR' : _GEODISTANCE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoDistance) - }) -_sym_db.RegisterMessage(GeoDistance) - -MultExpression = _reflection.GeneratedProtocolMessageType('MultExpression', (_message.Message,), { - 'DESCRIPTOR' : _MULTEXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MultExpression) - }) -_sym_db.RegisterMessage(MultExpression) - -SumExpression = _reflection.GeneratedProtocolMessageType('SumExpression', (_message.Message,), { - 'DESCRIPTOR' : _SUMEXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SumExpression) - }) -_sym_db.RegisterMessage(SumExpression) - -DivExpression = _reflection.GeneratedProtocolMessageType('DivExpression', (_message.Message,), { - 'DESCRIPTOR' : _DIVEXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DivExpression) - }) -_sym_db.RegisterMessage(DivExpression) - -PowExpression = _reflection.GeneratedProtocolMessageType('PowExpression', (_message.Message,), { - 'DESCRIPTOR' : _POWEXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PowExpression) - }) -_sym_db.RegisterMessage(PowExpression) - -DecayParamsExpression = _reflection.GeneratedProtocolMessageType('DecayParamsExpression', (_message.Message,), { - 'DESCRIPTOR' : _DECAYPARAMSEXPRESSION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DecayParamsExpression) - }) -_sym_db.RegisterMessage(DecayParamsExpression) - -NearestInputWithMmr = _reflection.GeneratedProtocolMessageType('NearestInputWithMmr', (_message.Message,), { - 'DESCRIPTOR' : _NEARESTINPUTWITHMMR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NearestInputWithMmr) - }) -_sym_db.RegisterMessage(NearestInputWithMmr) - -Mmr = _reflection.GeneratedProtocolMessageType('Mmr', (_message.Message,), { - 'DESCRIPTOR' : _MMR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Mmr) - }) -_sym_db.RegisterMessage(Mmr) - -Query = _reflection.GeneratedProtocolMessageType('Query', (_message.Message,), { - 'DESCRIPTOR' : _QUERY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Query) - }) -_sym_db.RegisterMessage(Query) - -PrefetchQuery = _reflection.GeneratedProtocolMessageType('PrefetchQuery', (_message.Message,), { - 'DESCRIPTOR' : _PREFETCHQUERY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PrefetchQuery) - }) -_sym_db.RegisterMessage(PrefetchQuery) - -QueryPoints = _reflection.GeneratedProtocolMessageType('QueryPoints', (_message.Message,), { - 'DESCRIPTOR' : _QUERYPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryPoints) - }) -_sym_db.RegisterMessage(QueryPoints) - -QueryBatchPoints = _reflection.GeneratedProtocolMessageType('QueryBatchPoints', (_message.Message,), { - 'DESCRIPTOR' : _QUERYBATCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryBatchPoints) - }) -_sym_db.RegisterMessage(QueryBatchPoints) - -QueryPointGroups = _reflection.GeneratedProtocolMessageType('QueryPointGroups', (_message.Message,), { - 'DESCRIPTOR' : _QUERYPOINTGROUPS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryPointGroups) - }) -_sym_db.RegisterMessage(QueryPointGroups) - -FacetCounts = _reflection.GeneratedProtocolMessageType('FacetCounts', (_message.Message,), { - 'DESCRIPTOR' : _FACETCOUNTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FacetCounts) - }) -_sym_db.RegisterMessage(FacetCounts) - -FacetValue = _reflection.GeneratedProtocolMessageType('FacetValue', (_message.Message,), { - 'DESCRIPTOR' : _FACETVALUE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FacetValue) - }) -_sym_db.RegisterMessage(FacetValue) - -FacetHit = _reflection.GeneratedProtocolMessageType('FacetHit', (_message.Message,), { - 'DESCRIPTOR' : _FACETHIT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FacetHit) - }) -_sym_db.RegisterMessage(FacetHit) - -SearchMatrixPoints = _reflection.GeneratedProtocolMessageType('SearchMatrixPoints', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixPoints) - }) -_sym_db.RegisterMessage(SearchMatrixPoints) - -SearchMatrixPairs = _reflection.GeneratedProtocolMessageType('SearchMatrixPairs', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXPAIRS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixPairs) - }) -_sym_db.RegisterMessage(SearchMatrixPairs) - -SearchMatrixPair = _reflection.GeneratedProtocolMessageType('SearchMatrixPair', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXPAIR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixPair) - }) -_sym_db.RegisterMessage(SearchMatrixPair) - -SearchMatrixOffsets = _reflection.GeneratedProtocolMessageType('SearchMatrixOffsets', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXOFFSETS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixOffsets) - }) -_sym_db.RegisterMessage(SearchMatrixOffsets) - -PointsUpdateOperation = _reflection.GeneratedProtocolMessageType('PointsUpdateOperation', (_message.Message,), { - - 'PointStructList' : _reflection.GeneratedProtocolMessageType('PointStructList', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_POINTSTRUCTLIST, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.PointStructList) - }) - , - - 'SetPayload' : _reflection.GeneratedProtocolMessageType('SetPayload', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.SetPayload.PayloadEntry) - }) - , - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_SETPAYLOAD, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.SetPayload) - }) - , - - 'OverwritePayload' : _reflection.GeneratedProtocolMessageType('OverwritePayload', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.OverwritePayload.PayloadEntry) - }) - , - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.OverwritePayload) - }) - , - - 'DeletePayload' : _reflection.GeneratedProtocolMessageType('DeletePayload', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_DELETEPAYLOAD, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.DeletePayload) - }) - , - - 'UpdateVectors' : _reflection.GeneratedProtocolMessageType('UpdateVectors', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_UPDATEVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.UpdateVectors) - }) - , - - 'DeleteVectors' : _reflection.GeneratedProtocolMessageType('DeleteVectors', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_DELETEVECTORS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.DeleteVectors) - }) - , - - 'DeletePoints' : _reflection.GeneratedProtocolMessageType('DeletePoints', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_DELETEPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.DeletePoints) - }) - , - - 'ClearPayload' : _reflection.GeneratedProtocolMessageType('ClearPayload', (_message.Message,), { - 'DESCRIPTOR' : _POINTSUPDATEOPERATION_CLEARPAYLOAD, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation.ClearPayload) - }) - , - 'DESCRIPTOR' : _POINTSUPDATEOPERATION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsUpdateOperation) - }) -_sym_db.RegisterMessage(PointsUpdateOperation) -_sym_db.RegisterMessage(PointsUpdateOperation.PointStructList) -_sym_db.RegisterMessage(PointsUpdateOperation.SetPayload) -_sym_db.RegisterMessage(PointsUpdateOperation.SetPayload.PayloadEntry) -_sym_db.RegisterMessage(PointsUpdateOperation.OverwritePayload) -_sym_db.RegisterMessage(PointsUpdateOperation.OverwritePayload.PayloadEntry) -_sym_db.RegisterMessage(PointsUpdateOperation.DeletePayload) -_sym_db.RegisterMessage(PointsUpdateOperation.UpdateVectors) -_sym_db.RegisterMessage(PointsUpdateOperation.DeleteVectors) -_sym_db.RegisterMessage(PointsUpdateOperation.DeletePoints) -_sym_db.RegisterMessage(PointsUpdateOperation.ClearPayload) - -UpdateBatchPoints = _reflection.GeneratedProtocolMessageType('UpdateBatchPoints', (_message.Message,), { - 'DESCRIPTOR' : _UPDATEBATCHPOINTS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateBatchPoints) - }) -_sym_db.RegisterMessage(UpdateBatchPoints) - -PointsOperationResponse = _reflection.GeneratedProtocolMessageType('PointsOperationResponse', (_message.Message,), { - 'DESCRIPTOR' : _POINTSOPERATIONRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsOperationResponse) - }) -_sym_db.RegisterMessage(PointsOperationResponse) - -UpdateResult = _reflection.GeneratedProtocolMessageType('UpdateResult', (_message.Message,), { - 'DESCRIPTOR' : _UPDATERESULT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateResult) - }) -_sym_db.RegisterMessage(UpdateResult) - -OrderValue = _reflection.GeneratedProtocolMessageType('OrderValue', (_message.Message,), { - 'DESCRIPTOR' : _ORDERVALUE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.OrderValue) - }) -_sym_db.RegisterMessage(OrderValue) - -ScoredPoint = _reflection.GeneratedProtocolMessageType('ScoredPoint', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _SCOREDPOINT_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ScoredPoint.PayloadEntry) - }) - , - 'DESCRIPTOR' : _SCOREDPOINT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ScoredPoint) - }) -_sym_db.RegisterMessage(ScoredPoint) -_sym_db.RegisterMessage(ScoredPoint.PayloadEntry) - -GroupId = _reflection.GeneratedProtocolMessageType('GroupId', (_message.Message,), { - 'DESCRIPTOR' : _GROUPID, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GroupId) - }) -_sym_db.RegisterMessage(GroupId) - -PointGroup = _reflection.GeneratedProtocolMessageType('PointGroup', (_message.Message,), { - 'DESCRIPTOR' : _POINTGROUP, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointGroup) - }) -_sym_db.RegisterMessage(PointGroup) - -GroupsResult = _reflection.GeneratedProtocolMessageType('GroupsResult', (_message.Message,), { - 'DESCRIPTOR' : _GROUPSRESULT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GroupsResult) - }) -_sym_db.RegisterMessage(GroupsResult) - -SearchResponse = _reflection.GeneratedProtocolMessageType('SearchResponse', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchResponse) - }) -_sym_db.RegisterMessage(SearchResponse) - -QueryResponse = _reflection.GeneratedProtocolMessageType('QueryResponse', (_message.Message,), { - 'DESCRIPTOR' : _QUERYRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryResponse) - }) -_sym_db.RegisterMessage(QueryResponse) - -QueryBatchResponse = _reflection.GeneratedProtocolMessageType('QueryBatchResponse', (_message.Message,), { - 'DESCRIPTOR' : _QUERYBATCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryBatchResponse) - }) -_sym_db.RegisterMessage(QueryBatchResponse) - -QueryGroupsResponse = _reflection.GeneratedProtocolMessageType('QueryGroupsResponse', (_message.Message,), { - 'DESCRIPTOR' : _QUERYGROUPSRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.QueryGroupsResponse) - }) -_sym_db.RegisterMessage(QueryGroupsResponse) - -BatchResult = _reflection.GeneratedProtocolMessageType('BatchResult', (_message.Message,), { - 'DESCRIPTOR' : _BATCHRESULT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.BatchResult) - }) -_sym_db.RegisterMessage(BatchResult) - -SearchBatchResponse = _reflection.GeneratedProtocolMessageType('SearchBatchResponse', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHBATCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchBatchResponse) - }) -_sym_db.RegisterMessage(SearchBatchResponse) - -SearchGroupsResponse = _reflection.GeneratedProtocolMessageType('SearchGroupsResponse', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHGROUPSRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchGroupsResponse) - }) -_sym_db.RegisterMessage(SearchGroupsResponse) - -CountResponse = _reflection.GeneratedProtocolMessageType('CountResponse', (_message.Message,), { - 'DESCRIPTOR' : _COUNTRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CountResponse) - }) -_sym_db.RegisterMessage(CountResponse) - -ScrollResponse = _reflection.GeneratedProtocolMessageType('ScrollResponse', (_message.Message,), { - 'DESCRIPTOR' : _SCROLLRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ScrollResponse) - }) -_sym_db.RegisterMessage(ScrollResponse) - -CountResult = _reflection.GeneratedProtocolMessageType('CountResult', (_message.Message,), { - 'DESCRIPTOR' : _COUNTRESULT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CountResult) - }) -_sym_db.RegisterMessage(CountResult) - -RetrievedPoint = _reflection.GeneratedProtocolMessageType('RetrievedPoint', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _RETRIEVEDPOINT_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RetrievedPoint.PayloadEntry) - }) - , - 'DESCRIPTOR' : _RETRIEVEDPOINT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RetrievedPoint) - }) -_sym_db.RegisterMessage(RetrievedPoint) -_sym_db.RegisterMessage(RetrievedPoint.PayloadEntry) - -GetResponse = _reflection.GeneratedProtocolMessageType('GetResponse', (_message.Message,), { - 'DESCRIPTOR' : _GETRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GetResponse) - }) -_sym_db.RegisterMessage(GetResponse) - -RecommendResponse = _reflection.GeneratedProtocolMessageType('RecommendResponse', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendResponse) - }) -_sym_db.RegisterMessage(RecommendResponse) - -RecommendBatchResponse = _reflection.GeneratedProtocolMessageType('RecommendBatchResponse', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDBATCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendBatchResponse) - }) -_sym_db.RegisterMessage(RecommendBatchResponse) - -DiscoverResponse = _reflection.GeneratedProtocolMessageType('DiscoverResponse', (_message.Message,), { - 'DESCRIPTOR' : _DISCOVERRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DiscoverResponse) - }) -_sym_db.RegisterMessage(DiscoverResponse) - -DiscoverBatchResponse = _reflection.GeneratedProtocolMessageType('DiscoverBatchResponse', (_message.Message,), { - 'DESCRIPTOR' : _DISCOVERBATCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DiscoverBatchResponse) - }) -_sym_db.RegisterMessage(DiscoverBatchResponse) - -RecommendGroupsResponse = _reflection.GeneratedProtocolMessageType('RecommendGroupsResponse', (_message.Message,), { - 'DESCRIPTOR' : _RECOMMENDGROUPSRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RecommendGroupsResponse) - }) -_sym_db.RegisterMessage(RecommendGroupsResponse) - -UpdateBatchResponse = _reflection.GeneratedProtocolMessageType('UpdateBatchResponse', (_message.Message,), { - 'DESCRIPTOR' : _UPDATEBATCHRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.UpdateBatchResponse) - }) -_sym_db.RegisterMessage(UpdateBatchResponse) - -FacetResponse = _reflection.GeneratedProtocolMessageType('FacetResponse', (_message.Message,), { - 'DESCRIPTOR' : _FACETRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FacetResponse) - }) -_sym_db.RegisterMessage(FacetResponse) - -SearchMatrixPairsResponse = _reflection.GeneratedProtocolMessageType('SearchMatrixPairsResponse', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXPAIRSRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixPairsResponse) - }) -_sym_db.RegisterMessage(SearchMatrixPairsResponse) - -SearchMatrixOffsetsResponse = _reflection.GeneratedProtocolMessageType('SearchMatrixOffsetsResponse', (_message.Message,), { - 'DESCRIPTOR' : _SEARCHMATRIXOFFSETSRESPONSE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SearchMatrixOffsetsResponse) - }) -_sym_db.RegisterMessage(SearchMatrixOffsetsResponse) - -Filter = _reflection.GeneratedProtocolMessageType('Filter', (_message.Message,), { - 'DESCRIPTOR' : _FILTER, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Filter) - }) -_sym_db.RegisterMessage(Filter) - -MinShould = _reflection.GeneratedProtocolMessageType('MinShould', (_message.Message,), { - 'DESCRIPTOR' : _MINSHOULD, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.MinShould) - }) -_sym_db.RegisterMessage(MinShould) - -Condition = _reflection.GeneratedProtocolMessageType('Condition', (_message.Message,), { - 'DESCRIPTOR' : _CONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Condition) - }) -_sym_db.RegisterMessage(Condition) - -IsEmptyCondition = _reflection.GeneratedProtocolMessageType('IsEmptyCondition', (_message.Message,), { - 'DESCRIPTOR' : _ISEMPTYCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.IsEmptyCondition) - }) -_sym_db.RegisterMessage(IsEmptyCondition) - -IsNullCondition = _reflection.GeneratedProtocolMessageType('IsNullCondition', (_message.Message,), { - 'DESCRIPTOR' : _ISNULLCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.IsNullCondition) - }) -_sym_db.RegisterMessage(IsNullCondition) - -HasIdCondition = _reflection.GeneratedProtocolMessageType('HasIdCondition', (_message.Message,), { - 'DESCRIPTOR' : _HASIDCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HasIdCondition) - }) -_sym_db.RegisterMessage(HasIdCondition) - -HasVectorCondition = _reflection.GeneratedProtocolMessageType('HasVectorCondition', (_message.Message,), { - 'DESCRIPTOR' : _HASVECTORCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HasVectorCondition) - }) -_sym_db.RegisterMessage(HasVectorCondition) - -NestedCondition = _reflection.GeneratedProtocolMessageType('NestedCondition', (_message.Message,), { - 'DESCRIPTOR' : _NESTEDCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.NestedCondition) - }) -_sym_db.RegisterMessage(NestedCondition) - -FieldCondition = _reflection.GeneratedProtocolMessageType('FieldCondition', (_message.Message,), { - 'DESCRIPTOR' : _FIELDCONDITION, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.FieldCondition) - }) -_sym_db.RegisterMessage(FieldCondition) - -Match = _reflection.GeneratedProtocolMessageType('Match', (_message.Message,), { - 'DESCRIPTOR' : _MATCH, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Match) - }) -_sym_db.RegisterMessage(Match) - -RepeatedStrings = _reflection.GeneratedProtocolMessageType('RepeatedStrings', (_message.Message,), { - 'DESCRIPTOR' : _REPEATEDSTRINGS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RepeatedStrings) - }) -_sym_db.RegisterMessage(RepeatedStrings) - -RepeatedIntegers = _reflection.GeneratedProtocolMessageType('RepeatedIntegers', (_message.Message,), { - 'DESCRIPTOR' : _REPEATEDINTEGERS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.RepeatedIntegers) - }) -_sym_db.RegisterMessage(RepeatedIntegers) - -Range = _reflection.GeneratedProtocolMessageType('Range', (_message.Message,), { - 'DESCRIPTOR' : _RANGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Range) - }) -_sym_db.RegisterMessage(Range) - -DatetimeRange = _reflection.GeneratedProtocolMessageType('DatetimeRange', (_message.Message,), { - 'DESCRIPTOR' : _DATETIMERANGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DatetimeRange) - }) -_sym_db.RegisterMessage(DatetimeRange) - -GeoBoundingBox = _reflection.GeneratedProtocolMessageType('GeoBoundingBox', (_message.Message,), { - 'DESCRIPTOR' : _GEOBOUNDINGBOX, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoBoundingBox) - }) -_sym_db.RegisterMessage(GeoBoundingBox) - -GeoRadius = _reflection.GeneratedProtocolMessageType('GeoRadius', (_message.Message,), { - 'DESCRIPTOR' : _GEORADIUS, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoRadius) - }) -_sym_db.RegisterMessage(GeoRadius) - -GeoLineString = _reflection.GeneratedProtocolMessageType('GeoLineString', (_message.Message,), { - 'DESCRIPTOR' : _GEOLINESTRING, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoLineString) - }) -_sym_db.RegisterMessage(GeoLineString) - -GeoPolygon = _reflection.GeneratedProtocolMessageType('GeoPolygon', (_message.Message,), { - 'DESCRIPTOR' : _GEOPOLYGON, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoPolygon) - }) -_sym_db.RegisterMessage(GeoPolygon) - -ValuesCount = _reflection.GeneratedProtocolMessageType('ValuesCount', (_message.Message,), { - 'DESCRIPTOR' : _VALUESCOUNT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ValuesCount) - }) -_sym_db.RegisterMessage(ValuesCount) - -PointsSelector = _reflection.GeneratedProtocolMessageType('PointsSelector', (_message.Message,), { - 'DESCRIPTOR' : _POINTSSELECTOR, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsSelector) - }) -_sym_db.RegisterMessage(PointsSelector) - -PointsIdsList = _reflection.GeneratedProtocolMessageType('PointsIdsList', (_message.Message,), { - 'DESCRIPTOR' : _POINTSIDSLIST, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointsIdsList) - }) -_sym_db.RegisterMessage(PointsIdsList) - -PointStruct = _reflection.GeneratedProtocolMessageType('PointStruct', (_message.Message,), { - - 'PayloadEntry' : _reflection.GeneratedProtocolMessageType('PayloadEntry', (_message.Message,), { - 'DESCRIPTOR' : _POINTSTRUCT_PAYLOADENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointStruct.PayloadEntry) - }) - , - 'DESCRIPTOR' : _POINTSTRUCT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.PointStruct) - }) -_sym_db.RegisterMessage(PointStruct) -_sym_db.RegisterMessage(PointStruct.PayloadEntry) - -GeoPoint = _reflection.GeneratedProtocolMessageType('GeoPoint', (_message.Message,), { - 'DESCRIPTOR' : _GEOPOINT, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.GeoPoint) - }) -_sym_db.RegisterMessage(GeoPoint) - -Usage = _reflection.GeneratedProtocolMessageType('Usage', (_message.Message,), { - 'DESCRIPTOR' : _USAGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.Usage) - }) -_sym_db.RegisterMessage(Usage) - -InferenceUsage = _reflection.GeneratedProtocolMessageType('InferenceUsage', (_message.Message,), { - - 'ModelsEntry' : _reflection.GeneratedProtocolMessageType('ModelsEntry', (_message.Message,), { - 'DESCRIPTOR' : _INFERENCEUSAGE_MODELSENTRY, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.InferenceUsage.ModelsEntry) - }) - , - 'DESCRIPTOR' : _INFERENCEUSAGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.InferenceUsage) - }) -_sym_db.RegisterMessage(InferenceUsage) -_sym_db.RegisterMessage(InferenceUsage.ModelsEntry) - -ModelUsage = _reflection.GeneratedProtocolMessageType('ModelUsage', (_message.Message,), { - 'DESCRIPTOR' : _MODELUSAGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ModelUsage) - }) -_sym_db.RegisterMessage(ModelUsage) - -HardwareUsage = _reflection.GeneratedProtocolMessageType('HardwareUsage', (_message.Message,), { - 'DESCRIPTOR' : _HARDWAREUSAGE, - '__module__' : 'points_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HardwareUsage) - }) -_sym_db.RegisterMessage(HardwareUsage) +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cpoints.proto\x12\x06qdrant\x1a\x11\x63ollections.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x13json_with_int.proto\"8\n\rWriteOrdering\x12\'\n\x04type\x18\x01 \x01(\x0e\x32\x19.qdrant.WriteOrderingType\"Y\n\x0fReadConsistency\x12+\n\x04type\x18\x01 \x01(\x0e\x32\x1b.qdrant.ReadConsistencyTypeH\x00\x12\x10\n\x06\x66\x61\x63tor\x18\x02 \x01(\x04H\x00\x42\x07\n\x05value\"<\n\x07PointId\x12\r\n\x03num\x18\x01 \x01(\x04H\x00\x12\x0e\n\x04uuid\x18\x02 \x01(\tH\x00\x42\x12\n\x10point_id_options\"\x1d\n\rSparseIndices\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\r\"\x96\x01\n\x08\x44ocument\x12\x0c\n\x04text\x18\x01 \x01(\t\x12\r\n\x05model\x18\x03 \x01(\t\x12.\n\x07options\x18\x04 \x03(\x0b\x32\x1d.qdrant.Document.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xa0\x01\n\x05Image\x12\x1c\n\x05image\x18\x01 \x01(\x0b\x32\r.qdrant.Value\x12\r\n\x05model\x18\x02 \x01(\t\x12+\n\x07options\x18\x03 \x03(\x0b\x32\x1a.qdrant.Image.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xb5\x01\n\x0fInferenceObject\x12\x1d\n\x06object\x18\x01 \x01(\x0b\x32\r.qdrant.Value\x12\r\n\x05model\x18\x02 \x01(\t\x12\x35\n\x07options\x18\x03 \x03(\x0b\x32$.qdrant.InferenceObject.OptionsEntry\x1a=\n\x0cOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\x83\x03\n\x06Vector\x12\x10\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x02\x18\x01\x12/\n\x07indices\x18\x02 \x01(\x0b\x32\x15.qdrant.SparseIndicesB\x02\x18\x01H\x01\x88\x01\x01\x12\x1e\n\rvectors_count\x18\x03 \x01(\rB\x02\x18\x01H\x02\x88\x01\x01\x12$\n\x05\x64\x65nse\x18\x65 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x66 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18g \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x12$\n\x08\x64ocument\x18h \x01(\x0b\x32\x10.qdrant.DocumentH\x00\x12\x1e\n\x05image\x18i \x01(\x0b\x32\r.qdrant.ImageH\x00\x12)\n\x06object\x18j \x01(\x0b\x32\x17.qdrant.InferenceObjectH\x00\x42\x08\n\x06vectorB\n\n\x08_indicesB\x10\n\x0e_vectors_count\"\x98\x02\n\x0cVectorOutput\x12\x10\n\x04\x64\x61ta\x18\x01 \x03(\x02\x42\x02\x18\x01\x12/\n\x07indices\x18\x02 \x01(\x0b\x32\x15.qdrant.SparseIndicesB\x02\x18\x01H\x01\x88\x01\x01\x12\x1e\n\rvectors_count\x18\x03 \x01(\rB\x02\x18\x01H\x02\x88\x01\x01\x12$\n\x05\x64\x65nse\x18\x65 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x66 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18g \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x42\x08\n\x06vectorB\n\n\x08_indicesB\x10\n\x0e_vectors_count\"\x1b\n\x0b\x44\x65nseVector\x12\x0c\n\x04\x64\x61ta\x18\x01 \x03(\x02\"/\n\x0cSparseVector\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x0f\n\x07indices\x18\x02 \x03(\r\"8\n\x10MultiDenseVector\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.qdrant.DenseVector\"\xa7\x02\n\x0bVectorInput\x12\x1d\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x12$\n\x05\x64\x65nse\x18\x02 \x01(\x0b\x32\x13.qdrant.DenseVectorH\x00\x12&\n\x06sparse\x18\x03 \x01(\x0b\x32\x14.qdrant.SparseVectorH\x00\x12/\n\x0bmulti_dense\x18\x04 \x01(\x0b\x32\x18.qdrant.MultiDenseVectorH\x00\x12$\n\x08\x64ocument\x18\x05 \x01(\x0b\x32\x10.qdrant.DocumentH\x00\x12\x1e\n\x05image\x18\x06 \x01(\x0b\x32\r.qdrant.ImageH\x00\x12)\n\x06object\x18\x07 \x01(\x0b\x32\x17.qdrant.InferenceObjectH\x00\x42\t\n\x07variant\"n\n\x10ShardKeySelector\x12$\n\nshard_keys\x18\x01 \x03(\x0b\x32\x10.qdrant.ShardKey\x12\'\n\x08\x66\x61llback\x18\x02 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x00\x88\x01\x01\x42\x0b\n\t_fallback\"\xb3\x02\n\x0cUpsertPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12#\n\x06points\x18\x03 \x03(\x0b\x32\x13.qdrant.PointStruct\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12*\n\rupdate_filter\x18\x06 \x01(\x0b\x32\x0e.qdrant.FilterH\x03\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorB\x10\n\x0e_update_filter\"\xf8\x01\n\x0c\x44\x65letePoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12&\n\x06points\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\x85\x03\n\tGetPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1c\n\x03ids\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x0cwith_payload\x18\x04 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\x05 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x00\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x06 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x08 \x01(\x04H\x03\x88\x01\x01\x42\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\n\n\x08_timeoutJ\x04\x08\x03\x10\x04\"\xba\x02\n\x12UpdatePointVectors\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12$\n\x06points\x18\x03 \x03(\x0b\x32\x14.qdrant.PointVectors\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12*\n\rupdate_filter\x18\x06 \x01(\x0b\x32\x0e.qdrant.FilterH\x03\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorB\x10\n\x0e_update_filter\"M\n\x0cPointVectors\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12 \n\x07vectors\x18\x02 \x01(\x0b\x32\x0f.qdrant.Vectors\"\xb1\x02\n\x12\x44\x65letePointVectors\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12/\n\x0fpoints_selector\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12(\n\x07vectors\x18\x04 \x01(\x0b\x32\x17.qdrant.VectorsSelector\x12,\n\x08ordering\x18\x05 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x06 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\xb5\x03\n\x10SetPayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x36\n\x07payload\x18\x03 \x03(\x0b\x32%.qdrant.SetPayloadPoints.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x05 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x01\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x02\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x03\x88\x01\x01\x12\x10\n\x03key\x18\x08 \x01(\tH\x04\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x07\n\x05_waitB\x12\n\x10_points_selectorB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorB\x06\n\x04_keyJ\x04\x08\x04\x10\x05\"\xb5\x02\n\x13\x44\x65letePayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x0c\n\x04keys\x18\x03 \x03(\t\x12\x34\n\x0fpoints_selector\x18\x05 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x01\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x02\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x07 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x03\x88\x01\x01\x42\x07\n\x05_waitB\x12\n\x10_points_selectorB\x0b\n\t_orderingB\x15\n\x13_shard_key_selectorJ\x04\x08\x04\x10\x05\"\xfe\x01\n\x12\x43learPayloadPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12&\n\x06points\x18\x03 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_orderingB\x15\n\x13_shard_key_selector\"\xaf\x02\n\x1a\x43reateFieldIndexCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\nfield_name\x18\x03 \x01(\t\x12*\n\nfield_type\x18\x04 \x01(\x0e\x32\x11.qdrant.FieldTypeH\x01\x88\x01\x01\x12;\n\x12\x66ield_index_params\x18\x05 \x01(\x0b\x32\x1a.qdrant.PayloadIndexParamsH\x02\x88\x01\x01\x12,\n\x08ordering\x18\x06 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x03\x88\x01\x01\x42\x07\n\x05_waitB\r\n\x0b_field_typeB\x15\n\x13_field_index_paramsB\x0b\n\t_ordering\"\xa0\x01\n\x1a\x44\x65leteFieldIndexCollection\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x12\n\nfield_name\x18\x03 \x01(\t\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_ordering\"(\n\x16PayloadIncludeSelector\x12\x0e\n\x06\x66ields\x18\x01 \x03(\t\"(\n\x16PayloadExcludeSelector\x12\x0e\n\x06\x66ields\x18\x01 \x03(\t\"\xa1\x01\n\x13WithPayloadSelector\x12\x10\n\x06\x65nable\x18\x01 \x01(\x08H\x00\x12\x31\n\x07include\x18\x02 \x01(\x0b\x32\x1e.qdrant.PayloadIncludeSelectorH\x00\x12\x31\n\x07\x65xclude\x18\x03 \x01(\x0b\x32\x1e.qdrant.PayloadExcludeSelectorH\x00\x42\x12\n\x10selector_options\"\x82\x01\n\x0cNamedVectors\x12\x32\n\x07vectors\x18\x01 \x03(\x0b\x32!.qdrant.NamedVectors.VectorsEntry\x1a>\n\x0cVectorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1d\n\x05value\x18\x02 \x01(\x0b\x32\x0e.qdrant.Vector:\x02\x38\x01\"\x94\x01\n\x12NamedVectorsOutput\x12\x38\n\x07vectors\x18\x01 \x03(\x0b\x32\'.qdrant.NamedVectorsOutput.VectorsEntry\x1a\x44\n\x0cVectorsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.qdrant.VectorOutput:\x02\x38\x01\"g\n\x07Vectors\x12 \n\x06vector\x18\x01 \x01(\x0b\x32\x0e.qdrant.VectorH\x00\x12\'\n\x07vectors\x18\x02 \x01(\x0b\x32\x14.qdrant.NamedVectorsH\x00\x42\x11\n\x0fvectors_options\"y\n\rVectorsOutput\x12&\n\x06vector\x18\x01 \x01(\x0b\x32\x14.qdrant.VectorOutputH\x00\x12-\n\x07vectors\x18\x02 \x01(\x0b\x32\x1a.qdrant.NamedVectorsOutputH\x00\x42\x11\n\x0fvectors_options\" \n\x0fVectorsSelector\x12\r\n\x05names\x18\x01 \x03(\t\"g\n\x13WithVectorsSelector\x12\x10\n\x06\x65nable\x18\x01 \x01(\x08H\x00\x12*\n\x07include\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorsSelectorH\x00\x42\x12\n\x10selector_options\"\x88\x01\n\x18QuantizationSearchParams\x12\x13\n\x06ignore\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07rescore\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12\x19\n\x0coversampling\x18\x03 \x01(\x01H\x02\x88\x01\x01\x42\t\n\x07_ignoreB\n\n\x08_rescoreB\x0f\n\r_oversampling\"e\n\x11\x41\x63ornSearchParams\x12\x13\n\x06\x65nable\x18\x01 \x01(\x08H\x00\x88\x01\x01\x12\x1c\n\x0fmax_selectivity\x18\x02 \x01(\x01H\x01\x88\x01\x01\x42\t\n\x07_enableB\x12\n\x10_max_selectivity\"\x81\x02\n\x0cSearchParams\x12\x14\n\x07hnsw_ef\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05\x65xact\x18\x02 \x01(\x08H\x01\x88\x01\x01\x12;\n\x0cquantization\x18\x03 \x01(\x0b\x32 .qdrant.QuantizationSearchParamsH\x02\x88\x01\x01\x12\x19\n\x0cindexed_only\x18\x04 \x01(\x08H\x03\x88\x01\x01\x12-\n\x05\x61\x63orn\x18\x05 \x01(\x0b\x32\x19.qdrant.AcornSearchParamsH\x04\x88\x01\x01\x42\n\n\x08_hnsw_efB\x08\n\x06_exactB\x0f\n\r_quantizationB\x0f\n\r_indexed_onlyB\x08\n\x06_acorn\"\x92\x05\n\x0cSearchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0e\n\x06vector\x18\x02 \x03(\x02\x12\x1e\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x04 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x08 \x01(\x02H\x00\x88\x01\x01\x12\x13\n\x06offset\x18\t \x01(\x04H\x01\x88\x01\x01\x12\x18\n\x0bvector_name\x18\n \x01(\tH\x02\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x0b \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\r \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0e \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x12\x32\n\x0esparse_indices\x18\x0f \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x07\x88\x01\x01\x42\x12\n\x10_score_thresholdB\t\n\x07_offsetB\x0e\n\x0c_vector_nameB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorB\x11\n\x0f_sparse_indicesJ\x04\x08\x05\x10\x06\"\xc8\x01\n\x11SearchBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12+\n\rsearch_points\x18\x02 \x03(\x0b\x32\x14.qdrant.SearchPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xb2\x01\n\nWithLookup\x12\x12\n\ncollection\x18\x01 \x01(\t\x12\x36\n\x0cwith_payload\x18\x02 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelectorH\x00\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x03 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x01\x88\x01\x01\x42\x0f\n\r_with_payloadB\x0f\n\r_with_vectors\"\xd5\x05\n\x11SearchPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0e\n\x06vector\x18\x02 \x03(\x02\x12\x1e\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x04 \x01(\r\x12\x31\n\x0cwith_payload\x18\x05 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x00\x88\x01\x01\x12\x18\n\x0bvector_name\x18\x08 \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\t \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x10\n\x08group_by\x18\n \x01(\t\x12\x12\n\ngroup_size\x18\x0b \x01(\r\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x03\x88\x01\x01\x12,\n\x0bwith_lookup\x18\r \x01(\x0b\x32\x12.qdrant.WithLookupH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\x0e \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0f \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x12\x32\n\x0esparse_indices\x18\x10 \x01(\x0b\x32\x15.qdrant.SparseIndicesH\x07\x88\x01\x01\x42\x12\n\x10_score_thresholdB\x0e\n\x0c_vector_nameB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorB\x11\n\x0f_sparse_indices\"}\n\tStartFrom\x12\x0f\n\x05\x66loat\x18\x01 \x01(\x01H\x00\x12\x11\n\x07integer\x18\x02 \x01(\x03H\x00\x12/\n\ttimestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12\x12\n\x08\x64\x61tetime\x18\x04 \x01(\tH\x00\x42\x07\n\x05value\"\x8a\x01\n\x07OrderBy\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\tdirection\x18\x02 \x01(\x0e\x32\x11.qdrant.DirectionH\x00\x88\x01\x01\x12*\n\nstart_from\x18\x03 \x01(\x0b\x32\x11.qdrant.StartFromH\x01\x88\x01\x01\x42\x0c\n\n_directionB\r\n\x0b_start_from\"\x8e\x04\n\x0cScrollPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\x12$\n\x06offset\x18\x03 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\rH\x01\x88\x01\x01\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\x07 \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x08 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x03\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\t \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x04\x88\x01\x01\x12&\n\x08order_by\x18\n \x01(\x0b\x32\x0f.qdrant.OrderByH\x05\x88\x01\x01\x12\x14\n\x07timeout\x18\x0b \x01(\x04H\x06\x88\x01\x01\x42\t\n\x07_offsetB\x08\n\x06_limitB\x0f\n\r_with_vectorsB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\x0b\n\t_order_byB\n\n\x08_timeoutJ\x04\x08\x05\x10\x06\"\xa5\x01\n\x0eLookupLocation\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x18\n\x0bvector_name\x18\x02 \x01(\tH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x42\x0e\n\x0c_vector_nameB\x15\n\x13_shard_key_selector\"\xcd\x06\n\x0fRecommendPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12!\n\x08positive\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12!\n\x08negative\x18\x03 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x07 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x08 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\t \x01(\x02H\x00\x88\x01\x01\x12\x13\n\x06offset\x18\n \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05using\x18\x0b \x01(\tH\x02\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\x0c \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x03\x88\x01\x01\x12\x30\n\x0blookup_from\x18\r \x01(\x0b\x32\x16.qdrant.LookupLocationH\x04\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x05\x88\x01\x01\x12\x30\n\x08strategy\x18\x10 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x06\x88\x01\x01\x12(\n\x10positive_vectors\x18\x11 \x03(\x0b\x32\x0e.qdrant.Vector\x12(\n\x10negative_vectors\x18\x12 \x03(\x0b\x32\x0e.qdrant.Vector\x12\x14\n\x07timeout\x18\x13 \x01(\x04H\x07\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x14 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x08\x88\x01\x01\x42\x12\n\x10_score_thresholdB\t\n\x07_offsetB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\x0b\n\t_strategyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selectorJ\x04\x08\x06\x10\x07\"\xd1\x01\n\x14RecommendBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x31\n\x10recommend_points\x18\x02 \x03(\x0b\x32\x17.qdrant.RecommendPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\x90\x07\n\x14RecommendPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12!\n\x08positive\x18\x02 \x03(\x0b\x32\x0f.qdrant.PointId\x12!\n\x08negative\x18\x03 \x03(\x0b\x32\x0f.qdrant.PointId\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\r\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x1c\n\x0fscore_threshold\x18\x08 \x01(\x02H\x00\x88\x01\x01\x12\x12\n\x05using\x18\t \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0b \x01(\x0b\x32\x16.qdrant.LookupLocationH\x03\x88\x01\x01\x12\x10\n\x08group_by\x18\x0c \x01(\t\x12\x12\n\ngroup_size\x18\r \x01(\r\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12,\n\x0bwith_lookup\x18\x0f \x01(\x0b\x32\x12.qdrant.WithLookupH\x05\x88\x01\x01\x12\x30\n\x08strategy\x18\x11 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x06\x88\x01\x01\x12(\n\x10positive_vectors\x18\x12 \x03(\x0b\x32\x0e.qdrant.Vector\x12(\n\x10negative_vectors\x18\x13 \x03(\x0b\x32\x0e.qdrant.Vector\x12\x14\n\x07timeout\x18\x14 \x01(\x04H\x07\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x15 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x08\x88\x01\x01\x42\x12\n\x10_score_thresholdB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\x0b\n\t_strategyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"A\n\x0cTargetVector\x12\'\n\x06single\x18\x01 \x01(\x0b\x32\x15.qdrant.VectorExampleH\x00\x42\x08\n\x06target\"[\n\rVectorExample\x12\x1d\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x12 \n\x06vector\x18\x02 \x01(\x0b\x32\x0e.qdrant.VectorH\x00\x42\t\n\x07\x65xample\"f\n\x12\x43ontextExamplePair\x12\'\n\x08positive\x18\x01 \x01(\x0b\x32\x15.qdrant.VectorExample\x12\'\n\x08negative\x18\x02 \x01(\x0b\x32\x15.qdrant.VectorExample\"\x8e\x05\n\x0e\x44iscoverPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12$\n\x06target\x18\x02 \x01(\x0b\x32\x14.qdrant.TargetVector\x12+\n\x07\x63ontext\x18\x03 \x03(\x0b\x32\x1a.qdrant.ContextExamplePair\x12\x1e\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.Filter\x12\r\n\x05limit\x18\x05 \x01(\x04\x12\x31\n\x0cwith_payload\x18\x06 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12$\n\x06params\x18\x07 \x01(\x0b\x32\x14.qdrant.SearchParams\x12\x13\n\x06offset\x18\x08 \x01(\x04H\x00\x88\x01\x01\x12\x12\n\x05using\x18\t \x01(\tH\x01\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x02\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0b \x01(\x0b\x32\x16.qdrant.LookupLocationH\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x14\n\x07timeout\x18\r \x01(\x04H\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x0e \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x42\t\n\x07_offsetB\x08\n\x06_usingB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x13\n\x11_read_consistencyB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"\xce\x01\n\x13\x44iscoverBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12/\n\x0f\x64iscover_points\x18\x02 \x03(\x0b\x32\x16.qdrant.DiscoverPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xa5\x02\n\x0b\x43ountPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\x12\x12\n\x05\x65xact\x18\x03 \x01(\x08H\x00\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x04 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x01\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x05 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x03\x88\x01\x01\x42\x08\n\x06_exactB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\n\n\x08_timeout\"\x9d\x01\n\x0eRecommendInput\x12%\n\x08positive\x18\x01 \x03(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x08negative\x18\x02 \x03(\x0b\x32\x13.qdrant.VectorInput\x12\x30\n\x08strategy\x18\x03 \x01(\x0e\x32\x19.qdrant.RecommendStrategyH\x00\x88\x01\x01\x42\x0b\n\t_strategy\"`\n\x10\x43ontextInputPair\x12%\n\x08positive\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x08negative\x18\x02 \x01(\x0b\x32\x13.qdrant.VectorInput\"[\n\rDiscoverInput\x12#\n\x06target\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12%\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x14.qdrant.ContextInput\"7\n\x0c\x43ontextInput\x12\'\n\x05pairs\x18\x01 \x03(\x0b\x32\x18.qdrant.ContextInputPair\"\xa2\x01\n\x07\x46ormula\x12&\n\nexpression\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12/\n\x08\x64\x65\x66\x61ults\x18\x02 \x03(\x0b\x32\x1d.qdrant.Formula.DefaultsEntry\x1a>\n\rDefaultsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\"\xcc\x05\n\nExpression\x12\x12\n\x08\x63onstant\x18\x01 \x01(\x02H\x00\x12\x12\n\x08variable\x18\x02 \x01(\tH\x00\x12&\n\tcondition\x18\x03 \x01(\x0b\x32\x11.qdrant.ConditionH\x00\x12+\n\x0cgeo_distance\x18\x04 \x01(\x0b\x32\x13.qdrant.GeoDistanceH\x00\x12\x12\n\x08\x64\x61tetime\x18\x05 \x01(\tH\x00\x12\x16\n\x0c\x64\x61tetime_key\x18\x06 \x01(\tH\x00\x12&\n\x04mult\x18\x07 \x01(\x0b\x32\x16.qdrant.MultExpressionH\x00\x12$\n\x03sum\x18\x08 \x01(\x0b\x32\x15.qdrant.SumExpressionH\x00\x12$\n\x03\x64iv\x18\t \x01(\x0b\x32\x15.qdrant.DivExpressionH\x00\x12!\n\x03neg\x18\n \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12!\n\x03\x61\x62s\x18\x0b \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12\"\n\x04sqrt\x18\x0c \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12$\n\x03pow\x18\r \x01(\x0b\x32\x15.qdrant.PowExpressionH\x00\x12!\n\x03\x65xp\x18\x0e \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12#\n\x05log10\x18\x0f \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12 \n\x02ln\x18\x10 \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x12\x32\n\texp_decay\x18\x11 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x12\x34\n\x0bgauss_decay\x18\x12 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x12\x32\n\tlin_decay\x18\x13 \x01(\x0b\x32\x1d.qdrant.DecayParamsExpressionH\x00\x42\t\n\x07variant\";\n\x0bGeoDistance\x12 \n\x06origin\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12\n\n\x02to\x18\x02 \x01(\t\"2\n\x0eMultExpression\x12 \n\x04mult\x18\x01 \x03(\x0b\x32\x12.qdrant.Expression\"0\n\rSumExpression\x12\x1f\n\x03sum\x18\x01 \x03(\x0b\x32\x12.qdrant.Expression\"\x86\x01\n\rDivExpression\x12 \n\x04left\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12!\n\x05right\x18\x02 \x01(\x0b\x32\x12.qdrant.Expression\x12\x1c\n\x0f\x62y_zero_default\x18\x03 \x01(\x02H\x00\x88\x01\x01\x42\x12\n\x10_by_zero_default\"W\n\rPowExpression\x12 \n\x04\x62\x61se\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12$\n\x08\x65xponent\x18\x02 \x01(\x0b\x32\x12.qdrant.Expression\"\xac\x01\n\x15\x44\x65\x63\x61yParamsExpression\x12\x1d\n\x01x\x18\x01 \x01(\x0b\x32\x12.qdrant.Expression\x12\'\n\x06target\x18\x02 \x01(\x0b\x32\x12.qdrant.ExpressionH\x00\x88\x01\x01\x12\x12\n\x05scale\x18\x03 \x01(\x02H\x01\x88\x01\x01\x12\x15\n\x08midpoint\x18\x04 \x01(\x02H\x02\x88\x01\x01\x42\t\n\x07_targetB\x08\n\x06_scaleB\x0b\n\t_midpoint\"U\n\x13NearestInputWithMmr\x12$\n\x07nearest\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInput\x12\x18\n\x03mmr\x18\x02 \x01(\x0b\x32\x0b.qdrant.Mmr\"_\n\x03Mmr\x12\x16\n\tdiversity\x18\x02 \x01(\x02H\x00\x88\x01\x01\x12\x1d\n\x10\x63\x61ndidates_limit\x18\x03 \x01(\rH\x01\x88\x01\x01\x42\x0c\n\n_diversityB\x13\n\x11_candidates_limit\"\x1b\n\x03Rrf\x12\x0e\n\x01k\x18\x01 \x01(\rH\x00\x88\x01\x01\x42\x04\n\x02_k\"\x9d\x03\n\x05Query\x12&\n\x07nearest\x18\x01 \x01(\x0b\x32\x13.qdrant.VectorInputH\x00\x12+\n\trecommend\x18\x02 \x01(\x0b\x32\x16.qdrant.RecommendInputH\x00\x12)\n\x08\x64iscover\x18\x03 \x01(\x0b\x32\x15.qdrant.DiscoverInputH\x00\x12\'\n\x07\x63ontext\x18\x04 \x01(\x0b\x32\x14.qdrant.ContextInputH\x00\x12#\n\x08order_by\x18\x05 \x01(\x0b\x32\x0f.qdrant.OrderByH\x00\x12 \n\x06\x66usion\x18\x06 \x01(\x0e\x32\x0e.qdrant.FusionH\x00\x12 \n\x06sample\x18\x07 \x01(\x0e\x32\x0e.qdrant.SampleH\x00\x12\"\n\x07\x66ormula\x18\x08 \x01(\x0b\x32\x0f.qdrant.FormulaH\x00\x12\x37\n\x10nearest_with_mmr\x18\t \x01(\x0b\x32\x1b.qdrant.NearestInputWithMmrH\x00\x12\x1a\n\x03rrf\x18\n \x01(\x0b\x32\x0b.qdrant.RrfH\x00\x42\t\n\x07variant\"\xfb\x02\n\rPrefetchQuery\x12\'\n\x08prefetch\x18\x01 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x02 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x03 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x05 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x06 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05limit\x18\x07 \x01(\x04H\x05\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x08 \x01(\x0b\x32\x16.qdrant.LookupLocationH\x06\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x08\n\x06_limitB\x0e\n\x0c_lookup_from\"\x85\x06\n\x0bQueryPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x08prefetch\x18\x02 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x03 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x04 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x04\x88\x01\x01\x12\x12\n\x05limit\x18\x08 \x01(\x04H\x05\x88\x01\x01\x12\x13\n\x06offset\x18\t \x01(\x04H\x06\x88\x01\x01\x12\x36\n\x0cwith_vectors\x18\n \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x07\x88\x01\x01\x12\x36\n\x0cwith_payload\x18\x0b \x01(\x0b\x32\x1b.qdrant.WithPayloadSelectorH\x08\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x0c \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\t\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\r \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\n\x88\x01\x01\x12\x30\n\x0blookup_from\x18\x0e \x01(\x0b\x32\x16.qdrant.LookupLocationH\x0b\x88\x01\x01\x12\x14\n\x07timeout\x18\x0f \x01(\x04H\x0c\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x08\n\x06_limitB\t\n\x07_offsetB\x0f\n\r_with_vectorsB\x0f\n\r_with_payloadB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selectorB\x0e\n\x0c_lookup_fromB\n\n\x08_timeout\"\xc5\x01\n\x10QueryBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12)\n\x0cquery_points\x18\x02 \x03(\x0b\x32\x13.qdrant.QueryPoints\x12\x36\n\x10read_consistency\x18\x03 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x00\x88\x01\x01\x12\x14\n\x07timeout\x18\x04 \x01(\x04H\x01\x88\x01\x01\x42\x13\n\x11_read_consistencyB\n\n\x08_timeout\"\xcc\x06\n\x10QueryPointGroups\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\'\n\x08prefetch\x18\x02 \x03(\x0b\x32\x15.qdrant.PrefetchQuery\x12!\n\x05query\x18\x03 \x01(\x0b\x32\r.qdrant.QueryH\x00\x88\x01\x01\x12\x12\n\x05using\x18\x04 \x01(\tH\x01\x88\x01\x01\x12#\n\x06\x66ilter\x18\x05 \x01(\x0b\x32\x0e.qdrant.FilterH\x02\x88\x01\x01\x12)\n\x06params\x18\x06 \x01(\x0b\x32\x14.qdrant.SearchParamsH\x03\x88\x01\x01\x12\x1c\n\x0fscore_threshold\x18\x07 \x01(\x02H\x04\x88\x01\x01\x12\x31\n\x0cwith_payload\x18\x08 \x01(\x0b\x32\x1b.qdrant.WithPayloadSelector\x12\x36\n\x0cwith_vectors\x18\t \x01(\x0b\x32\x1b.qdrant.WithVectorsSelectorH\x05\x88\x01\x01\x12\x30\n\x0blookup_from\x18\n \x01(\x0b\x32\x16.qdrant.LookupLocationH\x06\x88\x01\x01\x12\x12\n\x05limit\x18\x0b \x01(\x04H\x07\x88\x01\x01\x12\x17\n\ngroup_size\x18\x0c \x01(\x04H\x08\x88\x01\x01\x12\x10\n\x08group_by\x18\r \x01(\t\x12\x36\n\x10read_consistency\x18\x0e \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\t\x88\x01\x01\x12,\n\x0bwith_lookup\x18\x0f \x01(\x0b\x32\x12.qdrant.WithLookupH\n\x88\x01\x01\x12\x14\n\x07timeout\x18\x10 \x01(\x04H\x0b\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x11 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x0c\x88\x01\x01\x42\x08\n\x06_queryB\x08\n\x06_usingB\t\n\x07_filterB\t\n\x07_paramsB\x12\n\x10_score_thresholdB\x0f\n\r_with_vectorsB\x0e\n\x0c_lookup_fromB\x08\n\x06_limitB\r\n\x0b_group_sizeB\x13\n\x11_read_consistencyB\x0e\n\x0c_with_lookupB\n\n\x08_timeoutB\x15\n\x13_shard_key_selector\"\xe0\x02\n\x0b\x46\x61\x63\x65tCounts\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12#\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05\x65xact\x18\x05 \x01(\x08H\x02\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x03\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x07 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x04\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x08 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x05\x88\x01\x01\x42\t\n\x07_filterB\x08\n\x06_limitB\x08\n\x06_exactB\n\n\x08_timeoutB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selector\"^\n\nFacetValue\x12\x16\n\x0cstring_value\x18\x01 \x01(\tH\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nbool_value\x18\x03 \x01(\x08H\x00\x42\t\n\x07variant\"<\n\x08\x46\x61\x63\x65tHit\x12!\n\x05value\x18\x01 \x01(\x0b\x32\x12.qdrant.FacetValue\x12\r\n\x05\x63ount\x18\x02 \x01(\x04\"\xfa\x02\n\x12SearchMatrixPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12#\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x88\x01\x01\x12\x13\n\x06sample\x18\x03 \x01(\x04H\x01\x88\x01\x01\x12\x12\n\x05limit\x18\x04 \x01(\x04H\x02\x88\x01\x01\x12\x12\n\x05using\x18\x05 \x01(\tH\x03\x88\x01\x01\x12\x14\n\x07timeout\x18\x06 \x01(\x04H\x04\x88\x01\x01\x12\x36\n\x10read_consistency\x18\x07 \x01(\x0b\x32\x17.qdrant.ReadConsistencyH\x05\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x08 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x06\x88\x01\x01\x42\t\n\x07_filterB\t\n\x07_sampleB\x08\n\x06_limitB\x08\n\x06_usingB\n\n\x08_timeoutB\x13\n\x11_read_consistencyB\x15\n\x13_shard_key_selector\"<\n\x11SearchMatrixPairs\x12\'\n\x05pairs\x18\x01 \x03(\x0b\x32\x18.qdrant.SearchMatrixPair\"Y\n\x10SearchMatrixPair\x12\x1a\n\x01\x61\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x1a\n\x01\x62\x18\x02 \x01(\x0b\x32\x0f.qdrant.PointId\x12\r\n\x05score\x18\x03 \x01(\x02\"m\n\x13SearchMatrixOffsets\x12\x13\n\x0boffsets_row\x18\x01 \x03(\x04\x12\x13\n\x0boffsets_col\x18\x02 \x03(\x04\x12\x0e\n\x06scores\x18\x03 \x03(\x02\x12\x1c\n\x03ids\x18\x04 \x03(\x0b\x32\x0f.qdrant.PointId\"\x91\x13\n\x15PointsUpdateOperation\x12?\n\x06upsert\x18\x01 \x01(\x0b\x32-.qdrant.PointsUpdateOperation.PointStructListH\x00\x12\x37\n\x11\x64\x65lete_deprecated\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorB\x02\x18\x01H\x00\x12?\n\x0bset_payload\x18\x03 \x01(\x0b\x32(.qdrant.PointsUpdateOperation.SetPayloadH\x00\x12K\n\x11overwrite_payload\x18\x04 \x01(\x0b\x32..qdrant.PointsUpdateOperation.OverwritePayloadH\x00\x12\x45\n\x0e\x64\x65lete_payload\x18\x05 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.DeletePayloadH\x00\x12>\n\x18\x63lear_payload_deprecated\x18\x06 \x01(\x0b\x32\x16.qdrant.PointsSelectorB\x02\x18\x01H\x00\x12\x45\n\x0eupdate_vectors\x18\x07 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.UpdateVectorsH\x00\x12\x45\n\x0e\x64\x65lete_vectors\x18\x08 \x01(\x0b\x32+.qdrant.PointsUpdateOperation.DeleteVectorsH\x00\x12\x43\n\rdelete_points\x18\t \x01(\x0b\x32*.qdrant.PointsUpdateOperation.DeletePointsH\x00\x12\x43\n\rclear_payload\x18\n \x01(\x0b\x32*.qdrant.PointsUpdateOperation.ClearPayloadH\x00\x1a\xc6\x01\n\x0fPointStructList\x12#\n\x06points\x18\x01 \x03(\x0b\x32\x13.qdrant.PointStruct\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x12*\n\rupdate_filter\x18\x03 \x01(\x0b\x32\x0e.qdrant.FilterH\x01\x88\x01\x01\x42\x15\n\x13_shard_key_selectorB\x10\n\x0e_update_filter\x1a\xc9\x02\n\nSetPayload\x12\x46\n\x07payload\x18\x01 \x03(\x0b\x32\x35.qdrant.PointsUpdateOperation.SetPayload.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x12\x10\n\x03key\x18\x04 \x01(\tH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selectorB\x06\n\x04_key\x1a\xd5\x02\n\x10OverwritePayload\x12L\n\x07payload\x18\x01 \x03(\x0b\x32;.qdrant.PointsUpdateOperation.OverwritePayload.PayloadEntry\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x12\x10\n\x03key\x18\x04 \x01(\tH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selectorB\x06\n\x04_key\x1a\xb9\x01\n\rDeletePayload\x12\x0c\n\x04keys\x18\x01 \x03(\t\x12\x34\n\x0fpoints_selector\x18\x02 \x01(\x0b\x32\x16.qdrant.PointsSelectorH\x00\x88\x01\x01\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x01\x88\x01\x01\x42\x12\n\x10_points_selectorB\x15\n\x13_shard_key_selector\x1a\xc5\x01\n\rUpdateVectors\x12$\n\x06points\x18\x01 \x03(\x0b\x32\x14.qdrant.PointVectors\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x12*\n\rupdate_filter\x18\x03 \x01(\x0b\x32\x0e.qdrant.FilterH\x01\x88\x01\x01\x42\x15\n\x13_shard_key_selectorB\x10\n\x0e_update_filter\x1a\xbc\x01\n\rDeleteVectors\x12/\n\x0fpoints_selector\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12(\n\x07vectors\x18\x02 \x01(\x0b\x32\x17.qdrant.VectorsSelector\x12\x39\n\x12shard_key_selector\x18\x03 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\x88\x01\n\x0c\x44\x65letePoints\x12&\n\x06points\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selector\x1a\x88\x01\n\x0c\x43learPayload\x12&\n\x06points\x18\x01 \x01(\x0b\x32\x16.qdrant.PointsSelector\x12\x39\n\x12shard_key_selector\x18\x02 \x01(\x0b\x32\x18.qdrant.ShardKeySelectorH\x00\x88\x01\x01\x42\x15\n\x13_shard_key_selectorB\x0b\n\toperation\"\xb6\x01\n\x11UpdateBatchPoints\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x11\n\x04wait\x18\x02 \x01(\x08H\x00\x88\x01\x01\x12\x31\n\noperations\x18\x03 \x03(\x0b\x32\x1d.qdrant.PointsUpdateOperation\x12,\n\x08ordering\x18\x04 \x01(\x0b\x32\x15.qdrant.WriteOrderingH\x01\x88\x01\x01\x42\x07\n\x05_waitB\x0b\n\t_ordering\"z\n\x17PointsOperationResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.UpdateResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"`\n\x0cUpdateResult\x12\x19\n\x0coperation_id\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12$\n\x06status\x18\x02 \x01(\x0e\x32\x14.qdrant.UpdateStatusB\x0f\n\r_operation_id\"7\n\nOrderValue\x12\r\n\x03int\x18\x01 \x01(\x03H\x00\x12\x0f\n\x05\x66loat\x18\x02 \x01(\x01H\x00\x42\t\n\x07variant\"\xf1\x02\n\x0bScoredPoint\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x07payload\x18\x02 \x03(\x0b\x32 .qdrant.ScoredPoint.PayloadEntry\x12\r\n\x05score\x18\x03 \x01(\x02\x12\x0f\n\x07version\x18\x05 \x01(\x04\x12+\n\x07vectors\x18\x06 \x01(\x0b\x32\x15.qdrant.VectorsOutputH\x00\x88\x01\x01\x12(\n\tshard_key\x18\x07 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x01\x88\x01\x01\x12,\n\x0border_value\x18\x08 \x01(\x0b\x32\x12.qdrant.OrderValueH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsB\x0c\n\n_shard_keyB\x0e\n\x0c_order_valueJ\x04\x08\x04\x10\x05\"\\\n\x07GroupId\x12\x18\n\x0eunsigned_value\x18\x01 \x01(\x04H\x00\x12\x17\n\rinteger_value\x18\x02 \x01(\x03H\x00\x12\x16\n\x0cstring_value\x18\x03 \x01(\tH\x00\x42\x06\n\x04kind\"t\n\nPointGroup\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.GroupId\x12!\n\x04hits\x18\x02 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12&\n\x06lookup\x18\x03 \x01(\x0b\x32\x16.qdrant.RetrievedPoint\"2\n\x0cGroupsResult\x12\"\n\x06groups\x18\x01 \x03(\x0b\x32\x12.qdrant.PointGroup\"p\n\x0eSearchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"o\n\rQueryResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"t\n\x12QueryBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"v\n\x13QueryGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"2\n\x0b\x42\x61tchResult\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\"u\n\x13SearchBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"w\n\x14SearchGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"o\n\rCountResponse\x12#\n\x06result\x18\x01 \x01(\x0b\x32\x13.qdrant.CountResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\xb8\x01\n\x0eScrollResponse\x12.\n\x10next_page_offset\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointIdH\x00\x88\x01\x01\x12&\n\x06result\x18\x02 \x03(\x0b\x32\x16.qdrant.RetrievedPoint\x12\x0c\n\x04time\x18\x03 \x01(\x01\x12!\n\x05usage\x18\x04 \x01(\x0b\x32\r.qdrant.UsageH\x01\x88\x01\x01\x42\x13\n\x11_next_page_offsetB\x08\n\x06_usage\"\x1c\n\x0b\x43ountResult\x12\r\n\x05\x63ount\x18\x01 \x01(\x04\"\xd7\x02\n\x0eRetrievedPoint\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x34\n\x07payload\x18\x02 \x03(\x0b\x32#.qdrant.RetrievedPoint.PayloadEntry\x12+\n\x07vectors\x18\x04 \x01(\x0b\x32\x15.qdrant.VectorsOutputH\x00\x88\x01\x01\x12(\n\tshard_key\x18\x05 \x01(\x0b\x32\x10.qdrant.ShardKeyH\x01\x88\x01\x01\x12,\n\x0border_value\x18\x06 \x01(\x0b\x32\x12.qdrant.OrderValueH\x02\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsB\x0c\n\n_shard_keyB\x0e\n\x0c_order_valueJ\x04\x08\x03\x10\x04\"p\n\x0bGetResponse\x12&\n\x06result\x18\x01 \x03(\x0b\x32\x16.qdrant.RetrievedPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"s\n\x11RecommendResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"x\n\x16RecommendBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"r\n\x10\x44iscoverResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.ScoredPoint\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"w\n\x15\x44iscoverBatchResponse\x12#\n\x06result\x18\x01 \x03(\x0b\x32\x13.qdrant.BatchResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"z\n\x17RecommendGroupsResponse\x12$\n\x06result\x18\x01 \x01(\x0b\x32\x14.qdrant.GroupsResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"v\n\x13UpdateBatchResponse\x12$\n\x06result\x18\x01 \x03(\x0b\x32\x14.qdrant.UpdateResult\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"j\n\rFacetResponse\x12\x1e\n\x04hits\x18\x01 \x03(\x0b\x32\x10.qdrant.FacetHit\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\x81\x01\n\x19SearchMatrixPairsResponse\x12)\n\x06result\x18\x01 \x01(\x0b\x32\x19.qdrant.SearchMatrixPairs\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\x85\x01\n\x1bSearchMatrixOffsetsResponse\x12+\n\x06result\x18\x01 \x01(\x0b\x32\x1b.qdrant.SearchMatrixOffsets\x12\x0c\n\x04time\x18\x02 \x01(\x01\x12!\n\x05usage\x18\x03 \x01(\x0b\x32\r.qdrant.UsageH\x00\x88\x01\x01\x42\x08\n\x06_usage\"\xac\x01\n\x06\x46ilter\x12!\n\x06should\x18\x01 \x03(\x0b\x32\x11.qdrant.Condition\x12\x1f\n\x04must\x18\x02 \x03(\x0b\x32\x11.qdrant.Condition\x12#\n\x08must_not\x18\x03 \x03(\x0b\x32\x11.qdrant.Condition\x12*\n\nmin_should\x18\x04 \x01(\x0b\x32\x11.qdrant.MinShouldH\x00\x88\x01\x01\x42\r\n\x0b_min_should\"E\n\tMinShould\x12%\n\nconditions\x18\x01 \x03(\x0b\x32\x11.qdrant.Condition\x12\x11\n\tmin_count\x18\x02 \x01(\x04\"\xcb\x02\n\tCondition\x12\'\n\x05\x66ield\x18\x01 \x01(\x0b\x32\x16.qdrant.FieldConditionH\x00\x12,\n\x08is_empty\x18\x02 \x01(\x0b\x32\x18.qdrant.IsEmptyConditionH\x00\x12(\n\x06has_id\x18\x03 \x01(\x0b\x32\x16.qdrant.HasIdConditionH\x00\x12 \n\x06\x66ilter\x18\x04 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x12*\n\x07is_null\x18\x05 \x01(\x0b\x32\x17.qdrant.IsNullConditionH\x00\x12)\n\x06nested\x18\x06 \x01(\x0b\x32\x17.qdrant.NestedConditionH\x00\x12\x30\n\nhas_vector\x18\x07 \x01(\x0b\x32\x1a.qdrant.HasVectorConditionH\x00\x42\x12\n\x10\x63ondition_one_of\"\x1f\n\x10IsEmptyCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\"\x1e\n\x0fIsNullCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\"1\n\x0eHasIdCondition\x12\x1f\n\x06has_id\x18\x01 \x03(\x0b\x32\x0f.qdrant.PointId\"(\n\x12HasVectorCondition\x12\x12\n\nhas_vector\x18\x01 \x01(\t\">\n\x0fNestedCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1e\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.Filter\"\xfb\x02\n\x0e\x46ieldCondition\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05match\x18\x02 \x01(\x0b\x32\r.qdrant.Match\x12\x1c\n\x05range\x18\x03 \x01(\x0b\x32\r.qdrant.Range\x12\x30\n\x10geo_bounding_box\x18\x04 \x01(\x0b\x32\x16.qdrant.GeoBoundingBox\x12%\n\ngeo_radius\x18\x05 \x01(\x0b\x32\x11.qdrant.GeoRadius\x12)\n\x0cvalues_count\x18\x06 \x01(\x0b\x32\x13.qdrant.ValuesCount\x12\'\n\x0bgeo_polygon\x18\x07 \x01(\x0b\x32\x12.qdrant.GeoPolygon\x12-\n\x0e\x64\x61tetime_range\x18\x08 \x01(\x0b\x32\x15.qdrant.DatetimeRange\x12\x15\n\x08is_empty\x18\t \x01(\x08H\x00\x88\x01\x01\x12\x14\n\x07is_null\x18\n \x01(\x08H\x01\x88\x01\x01\x42\x0b\n\t_is_emptyB\n\n\x08_is_null\"\xc9\x02\n\x05Match\x12\x11\n\x07keyword\x18\x01 \x01(\tH\x00\x12\x11\n\x07integer\x18\x02 \x01(\x03H\x00\x12\x11\n\x07\x62oolean\x18\x03 \x01(\x08H\x00\x12\x0e\n\x04text\x18\x04 \x01(\tH\x00\x12+\n\x08keywords\x18\x05 \x01(\x0b\x32\x17.qdrant.RepeatedStringsH\x00\x12,\n\x08integers\x18\x06 \x01(\x0b\x32\x18.qdrant.RepeatedIntegersH\x00\x12\x33\n\x0f\x65xcept_integers\x18\x07 \x01(\x0b\x32\x18.qdrant.RepeatedIntegersH\x00\x12\x32\n\x0f\x65xcept_keywords\x18\x08 \x01(\x0b\x32\x17.qdrant.RepeatedStringsH\x00\x12\x10\n\x06phrase\x18\t \x01(\tH\x00\x12\x12\n\x08text_any\x18\n \x01(\tH\x00\x42\r\n\x0bmatch_value\"\"\n\x0fRepeatedStrings\x12\x0f\n\x07strings\x18\x01 \x03(\t\"$\n\x10RepeatedIntegers\x12\x10\n\x08integers\x18\x01 \x03(\x03\"k\n\x05Range\x12\x0f\n\x02lt\x18\x01 \x01(\x01H\x00\x88\x01\x01\x12\x0f\n\x02gt\x18\x02 \x01(\x01H\x01\x88\x01\x01\x12\x10\n\x03gte\x18\x03 \x01(\x01H\x02\x88\x01\x01\x12\x10\n\x03lte\x18\x04 \x01(\x01H\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"\xe3\x01\n\rDatetimeRange\x12+\n\x02lt\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x88\x01\x01\x12+\n\x02gt\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x01\x88\x01\x01\x12,\n\x03gte\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x02\x88\x01\x01\x12,\n\x03lte\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"\\\n\x0eGeoBoundingBox\x12\"\n\x08top_left\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12&\n\x0c\x62ottom_right\x18\x02 \x01(\x0b\x32\x10.qdrant.GeoPoint\"=\n\tGeoRadius\x12 \n\x06\x63\x65nter\x18\x01 \x01(\x0b\x32\x10.qdrant.GeoPoint\x12\x0e\n\x06radius\x18\x02 \x01(\x02\"1\n\rGeoLineString\x12 \n\x06points\x18\x01 \x03(\x0b\x32\x10.qdrant.GeoPoint\"_\n\nGeoPolygon\x12\'\n\x08\x65xterior\x18\x01 \x01(\x0b\x32\x15.qdrant.GeoLineString\x12(\n\tinteriors\x18\x02 \x03(\x0b\x32\x15.qdrant.GeoLineString\"q\n\x0bValuesCount\x12\x0f\n\x02lt\x18\x01 \x01(\x04H\x00\x88\x01\x01\x12\x0f\n\x02gt\x18\x02 \x01(\x04H\x01\x88\x01\x01\x12\x10\n\x03gte\x18\x03 \x01(\x04H\x02\x88\x01\x01\x12\x10\n\x03lte\x18\x04 \x01(\x04H\x03\x88\x01\x01\x42\x05\n\x03_ltB\x05\n\x03_gtB\x06\n\x04_gteB\x06\n\x04_lte\"u\n\x0ePointsSelector\x12\'\n\x06points\x18\x01 \x01(\x0b\x32\x15.qdrant.PointsIdsListH\x00\x12 \n\x06\x66ilter\x18\x02 \x01(\x0b\x32\x0e.qdrant.FilterH\x00\x42\x18\n\x16points_selector_one_of\"-\n\rPointsIdsList\x12\x1c\n\x03ids\x18\x01 \x03(\x0b\x32\x0f.qdrant.PointId\"\xd5\x01\n\x0bPointStruct\x12\x1b\n\x02id\x18\x01 \x01(\x0b\x32\x0f.qdrant.PointId\x12\x31\n\x07payload\x18\x03 \x03(\x0b\x32 .qdrant.PointStruct.PayloadEntry\x12%\n\x07vectors\x18\x04 \x01(\x0b\x32\x0f.qdrant.VectorsH\x00\x88\x01\x01\x1a=\n\x0cPayloadEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x1c\n\x05value\x18\x02 \x01(\x0b\x32\r.qdrant.Value:\x02\x38\x01\x42\n\n\x08_vectorsJ\x04\x08\x02\x10\x03\"$\n\x08GeoPoint\x12\x0b\n\x03lon\x18\x01 \x01(\x01\x12\x0b\n\x03lat\x18\x02 \x01(\x01\"\x80\x01\n\x05Usage\x12,\n\x08hardware\x18\x01 \x01(\x0b\x32\x15.qdrant.HardwareUsageH\x00\x88\x01\x01\x12.\n\tinference\x18\x02 \x01(\x0b\x32\x16.qdrant.InferenceUsageH\x01\x88\x01\x01\x42\x0b\n\t_hardwareB\x0c\n\n_inference\"\x87\x01\n\x0eInferenceUsage\x12\x32\n\x06models\x18\x01 \x03(\x0b\x32\".qdrant.InferenceUsage.ModelsEntry\x1a\x41\n\x0bModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12!\n\x05value\x18\x02 \x01(\x0b\x32\x12.qdrant.ModelUsage:\x02\x38\x01\"\x1c\n\nModelUsage\x12\x0e\n\x06tokens\x18\x01 \x01(\x04\"\xbf\x01\n\rHardwareUsage\x12\x0b\n\x03\x63pu\x18\x01 \x01(\x04\x12\x17\n\x0fpayload_io_read\x18\x02 \x01(\x04\x12\x18\n\x10payload_io_write\x18\x03 \x01(\x04\x12\x1d\n\x15payload_index_io_read\x18\x04 \x01(\x04\x12\x1e\n\x16payload_index_io_write\x18\x05 \x01(\x04\x12\x16\n\x0evector_io_read\x18\x06 \x01(\x04\x12\x17\n\x0fvector_io_write\x18\x07 \x01(\x04*5\n\x11WriteOrderingType\x12\x08\n\x04Weak\x10\x00\x12\n\n\x06Medium\x10\x01\x12\n\n\x06Strong\x10\x02*8\n\x13ReadConsistencyType\x12\x07\n\x03\x41ll\x10\x00\x12\x0c\n\x08Majority\x10\x01\x12\n\n\x06Quorum\x10\x02*\xad\x01\n\tFieldType\x12\x14\n\x10\x46ieldTypeKeyword\x10\x00\x12\x14\n\x10\x46ieldTypeInteger\x10\x01\x12\x12\n\x0e\x46ieldTypeFloat\x10\x02\x12\x10\n\x0c\x46ieldTypeGeo\x10\x03\x12\x11\n\rFieldTypeText\x10\x04\x12\x11\n\rFieldTypeBool\x10\x05\x12\x15\n\x11\x46ieldTypeDatetime\x10\x06\x12\x11\n\rFieldTypeUuid\x10\x07*\x1e\n\tDirection\x12\x07\n\x03\x41sc\x10\x00\x12\x08\n\x04\x44\x65sc\x10\x01*D\n\x11RecommendStrategy\x12\x11\n\rAverageVector\x10\x00\x12\r\n\tBestScore\x10\x01\x12\r\n\tSumScores\x10\x02*\x1b\n\x06\x46usion\x12\x07\n\x03RRF\x10\x00\x12\x08\n\x04\x44\x42SF\x10\x01*\x14\n\x06Sample\x12\n\n\x06Random\x10\x00*[\n\x0cUpdateStatus\x12\x17\n\x13UnknownUpdateStatus\x10\x00\x12\x10\n\x0c\x41\x63knowledged\x10\x01\x12\r\n\tCompleted\x10\x02\x12\x11\n\rClockRejected\x10\x03\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'points_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _DOCUMENT_OPTIONSENTRY._options = None - _DOCUMENT_OPTIONSENTRY._serialized_options = b'8\001' - _IMAGE_OPTIONSENTRY._options = None - _IMAGE_OPTIONSENTRY._serialized_options = b'8\001' - _INFERENCEOBJECT_OPTIONSENTRY._options = None - _INFERENCEOBJECT_OPTIONSENTRY._serialized_options = b'8\001' - _SETPAYLOADPOINTS_PAYLOADENTRY._options = None - _SETPAYLOADPOINTS_PAYLOADENTRY._serialized_options = b'8\001' - _NAMEDVECTORS_VECTORSENTRY._options = None - _NAMEDVECTORS_VECTORSENTRY._serialized_options = b'8\001' - _NAMEDVECTORSOUTPUT_VECTORSENTRY._options = None - _NAMEDVECTORSOUTPUT_VECTORSENTRY._serialized_options = b'8\001' - _FORMULA_DEFAULTSENTRY._options = None - _FORMULA_DEFAULTSENTRY._serialized_options = b'8\001' - _POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY._options = None - _POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY._serialized_options = b'8\001' - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY._options = None - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY._serialized_options = b'8\001' - _POINTSUPDATEOPERATION.fields_by_name['delete_deprecated']._options = None - _POINTSUPDATEOPERATION.fields_by_name['delete_deprecated']._serialized_options = b'\030\001' - _POINTSUPDATEOPERATION.fields_by_name['clear_payload_deprecated']._options = None - _POINTSUPDATEOPERATION.fields_by_name['clear_payload_deprecated']._serialized_options = b'\030\001' - _SCOREDPOINT_PAYLOADENTRY._options = None - _SCOREDPOINT_PAYLOADENTRY._serialized_options = b'8\001' - _RETRIEVEDPOINT_PAYLOADENTRY._options = None - _RETRIEVEDPOINT_PAYLOADENTRY._serialized_options = b'8\001' - _POINTSTRUCT_PAYLOADENTRY._options = None - _POINTSTRUCT_PAYLOADENTRY._serialized_options = b'8\001' - _INFERENCEUSAGE_MODELSENTRY._options = None - _INFERENCEUSAGE_MODELSENTRY._serialized_options = b'8\001' - _WRITEORDERINGTYPE._serialized_start=27430 - _WRITEORDERINGTYPE._serialized_end=27483 - _READCONSISTENCYTYPE._serialized_start=27485 - _READCONSISTENCYTYPE._serialized_end=27541 - _FIELDTYPE._serialized_start=27544 - _FIELDTYPE._serialized_end=27717 - _DIRECTION._serialized_start=27719 - _DIRECTION._serialized_end=27749 - _RECOMMENDSTRATEGY._serialized_start=27751 - _RECOMMENDSTRATEGY._serialized_end=27819 - _FUSION._serialized_start=27821 - _FUSION._serialized_end=27848 - _SAMPLE._serialized_start=27850 - _SAMPLE._serialized_end=27870 - _UPDATESTATUS._serialized_start=27872 - _UPDATESTATUS._serialized_end=27963 - _WRITEORDERING._serialized_start=97 - _WRITEORDERING._serialized_end=153 - _READCONSISTENCY._serialized_start=155 - _READCONSISTENCY._serialized_end=244 - _POINTID._serialized_start=246 - _POINTID._serialized_end=306 - _SPARSEINDICES._serialized_start=308 - _SPARSEINDICES._serialized_end=337 - _DOCUMENT._serialized_start=340 - _DOCUMENT._serialized_end=490 - _DOCUMENT_OPTIONSENTRY._serialized_start=429 - _DOCUMENT_OPTIONSENTRY._serialized_end=490 - _IMAGE._serialized_start=493 - _IMAGE._serialized_end=653 - _IMAGE_OPTIONSENTRY._serialized_start=429 - _IMAGE_OPTIONSENTRY._serialized_end=490 - _INFERENCEOBJECT._serialized_start=656 - _INFERENCEOBJECT._serialized_end=837 - _INFERENCEOBJECT_OPTIONSENTRY._serialized_start=429 - _INFERENCEOBJECT_OPTIONSENTRY._serialized_end=490 - _VECTOR._serialized_start=840 - _VECTOR._serialized_end=1215 - _VECTOROUTPUT._serialized_start=1218 - _VECTOROUTPUT._serialized_end=1486 - _DENSEVECTOR._serialized_start=1488 - _DENSEVECTOR._serialized_end=1515 - _SPARSEVECTOR._serialized_start=1517 - _SPARSEVECTOR._serialized_end=1564 - _MULTIDENSEVECTOR._serialized_start=1566 - _MULTIDENSEVECTOR._serialized_end=1622 - _VECTORINPUT._serialized_start=1625 - _VECTORINPUT._serialized_end=1920 - _SHARDKEYSELECTOR._serialized_start=1922 - _SHARDKEYSELECTOR._serialized_end=1978 - _UPSERTPOINTS._serialized_start=1981 - _UPSERTPOINTS._serialized_end=2226 - _DELETEPOINTS._serialized_start=2229 - _DELETEPOINTS._serialized_end=2477 - _GETPOINTS._serialized_start=2480 - _GETPOINTS._serialized_end=2869 - _UPDATEPOINTVECTORS._serialized_start=2872 - _UPDATEPOINTVECTORS._serialized_end=3124 - _POINTVECTORS._serialized_start=3126 - _POINTVECTORS._serialized_end=3203 - _DELETEPOINTVECTORS._serialized_start=3206 - _DELETEPOINTVECTORS._serialized_end=3511 - _SETPAYLOADPOINTS._serialized_start=3514 - _SETPAYLOADPOINTS._serialized_end=3951 - _SETPAYLOADPOINTS_PAYLOADENTRY._serialized_start=3811 - _SETPAYLOADPOINTS_PAYLOADENTRY._serialized_end=3872 - _DELETEPAYLOADPOINTS._serialized_start=3954 - _DELETEPAYLOADPOINTS._serialized_end=4263 - _CLEARPAYLOADPOINTS._serialized_start=4266 - _CLEARPAYLOADPOINTS._serialized_end=4520 - _CREATEFIELDINDEXCOLLECTION._serialized_start=4523 - _CREATEFIELDINDEXCOLLECTION._serialized_end=4826 - _DELETEFIELDINDEXCOLLECTION._serialized_start=4829 - _DELETEFIELDINDEXCOLLECTION._serialized_end=4989 - _PAYLOADINCLUDESELECTOR._serialized_start=4991 - _PAYLOADINCLUDESELECTOR._serialized_end=5031 - _PAYLOADEXCLUDESELECTOR._serialized_start=5033 - _PAYLOADEXCLUDESELECTOR._serialized_end=5073 - _WITHPAYLOADSELECTOR._serialized_start=5076 - _WITHPAYLOADSELECTOR._serialized_end=5237 - _NAMEDVECTORS._serialized_start=5240 - _NAMEDVECTORS._serialized_end=5370 - _NAMEDVECTORS_VECTORSENTRY._serialized_start=5308 - _NAMEDVECTORS_VECTORSENTRY._serialized_end=5370 - _NAMEDVECTORSOUTPUT._serialized_start=5373 - _NAMEDVECTORSOUTPUT._serialized_end=5521 - _NAMEDVECTORSOUTPUT_VECTORSENTRY._serialized_start=5453 - _NAMEDVECTORSOUTPUT_VECTORSENTRY._serialized_end=5521 - _VECTORS._serialized_start=5523 - _VECTORS._serialized_end=5626 - _VECTORSOUTPUT._serialized_start=5628 - _VECTORSOUTPUT._serialized_end=5749 - _VECTORSSELECTOR._serialized_start=5751 - _VECTORSSELECTOR._serialized_end=5783 - _WITHVECTORSSELECTOR._serialized_start=5785 - _WITHVECTORSSELECTOR._serialized_end=5888 - _QUANTIZATIONSEARCHPARAMS._serialized_start=5891 - _QUANTIZATIONSEARCHPARAMS._serialized_end=6027 - _SEARCHPARAMS._serialized_start=6030 - _SEARCHPARAMS._serialized_end=6230 - _SEARCHPOINTS._serialized_start=6233 - _SEARCHPOINTS._serialized_end=6891 - _SEARCHBATCHPOINTS._serialized_start=6894 - _SEARCHBATCHPOINTS._serialized_end=7094 - _WITHLOOKUP._serialized_start=7097 - _WITHLOOKUP._serialized_end=7275 - _SEARCHPOINTGROUPS._serialized_start=7278 - _SEARCHPOINTGROUPS._serialized_end=8003 - _STARTFROM._serialized_start=8005 - _STARTFROM._serialized_end=8130 - _ORDERBY._serialized_start=8133 - _ORDERBY._serialized_end=8271 - _SCROLLPOINTS._serialized_start=8274 - _SCROLLPOINTS._serialized_end=8800 - _LOOKUPLOCATION._serialized_start=8803 - _LOOKUPLOCATION._serialized_end=8968 - _RECOMMENDPOINTS._serialized_start=8971 - _RECOMMENDPOINTS._serialized_end=9816 - _RECOMMENDBATCHPOINTS._serialized_start=9819 - _RECOMMENDBATCHPOINTS._serialized_end=10028 - _RECOMMENDPOINTGROUPS._serialized_start=10031 - _RECOMMENDPOINTGROUPS._serialized_end=10943 - _TARGETVECTOR._serialized_start=10945 - _TARGETVECTOR._serialized_end=11010 - _VECTOREXAMPLE._serialized_start=11012 - _VECTOREXAMPLE._serialized_end=11103 - _CONTEXTEXAMPLEPAIR._serialized_start=11105 - _CONTEXTEXAMPLEPAIR._serialized_end=11207 - _DISCOVERPOINTS._serialized_start=11210 - _DISCOVERPOINTS._serialized_end=11864 - _DISCOVERBATCHPOINTS._serialized_start=11867 - _DISCOVERBATCHPOINTS._serialized_end=12073 - _COUNTPOINTS._serialized_start=12076 - _COUNTPOINTS._serialized_end=12369 - _RECOMMENDINPUT._serialized_start=12372 - _RECOMMENDINPUT._serialized_end=12529 - _CONTEXTINPUTPAIR._serialized_start=12531 - _CONTEXTINPUTPAIR._serialized_end=12627 - _DISCOVERINPUT._serialized_start=12629 - _DISCOVERINPUT._serialized_end=12720 - _CONTEXTINPUT._serialized_start=12722 - _CONTEXTINPUT._serialized_end=12777 - _FORMULA._serialized_start=12780 - _FORMULA._serialized_end=12942 - _FORMULA_DEFAULTSENTRY._serialized_start=12880 - _FORMULA_DEFAULTSENTRY._serialized_end=12942 - _EXPRESSION._serialized_start=12945 - _EXPRESSION._serialized_end=13661 - _GEODISTANCE._serialized_start=13663 - _GEODISTANCE._serialized_end=13722 - _MULTEXPRESSION._serialized_start=13724 - _MULTEXPRESSION._serialized_end=13774 - _SUMEXPRESSION._serialized_start=13776 - _SUMEXPRESSION._serialized_end=13824 - _DIVEXPRESSION._serialized_start=13827 - _DIVEXPRESSION._serialized_end=13961 - _POWEXPRESSION._serialized_start=13963 - _POWEXPRESSION._serialized_end=14050 - _DECAYPARAMSEXPRESSION._serialized_start=14053 - _DECAYPARAMSEXPRESSION._serialized_end=14225 - _NEARESTINPUTWITHMMR._serialized_start=14227 - _NEARESTINPUTWITHMMR._serialized_end=14312 - _MMR._serialized_start=14314 - _MMR._serialized_end=14409 - _QUERY._serialized_start=14412 - _QUERY._serialized_end=14797 - _PREFETCHQUERY._serialized_start=14800 - _PREFETCHQUERY._serialized_end=15179 - _QUERYPOINTS._serialized_start=15182 - _QUERYPOINTS._serialized_end=15955 - _QUERYBATCHPOINTS._serialized_start=15958 - _QUERYBATCHPOINTS._serialized_end=16155 - _QUERYPOINTGROUPS._serialized_start=16158 - _QUERYPOINTGROUPS._serialized_end=17002 - _FACETCOUNTS._serialized_start=17005 - _FACETCOUNTS._serialized_end=17357 - _FACETVALUE._serialized_start=17359 - _FACETVALUE._serialized_end=17453 - _FACETHIT._serialized_start=17455 - _FACETHIT._serialized_end=17515 - _SEARCHMATRIXPOINTS._serialized_start=17518 - _SEARCHMATRIXPOINTS._serialized_end=17896 - _SEARCHMATRIXPAIRS._serialized_start=17898 - _SEARCHMATRIXPAIRS._serialized_end=17958 - _SEARCHMATRIXPAIR._serialized_start=17960 - _SEARCHMATRIXPAIR._serialized_end=18049 - _SEARCHMATRIXOFFSETS._serialized_start=18051 - _SEARCHMATRIXOFFSETS._serialized_end=18160 - _POINTSUPDATEOPERATION._serialized_start=18163 - _POINTSUPDATEOPERATION._serialized_end=20488 - _POINTSUPDATEOPERATION_POINTSTRUCTLIST._serialized_start=18868 - _POINTSUPDATEOPERATION_POINTSTRUCTLIST._serialized_end=19004 - _POINTSUPDATEOPERATION_SETPAYLOAD._serialized_start=19007 - _POINTSUPDATEOPERATION_SETPAYLOAD._serialized_end=19336 - _POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY._serialized_start=3811 - _POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY._serialized_end=3872 - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD._serialized_start=19339 - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD._serialized_end=19680 - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY._serialized_start=3811 - _POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY._serialized_end=3872 - _POINTSUPDATEOPERATION_DELETEPAYLOAD._serialized_start=19683 - _POINTSUPDATEOPERATION_DELETEPAYLOAD._serialized_end=19868 - _POINTSUPDATEOPERATION_UPDATEVECTORS._serialized_start=19871 - _POINTSUPDATEOPERATION_UPDATEVECTORS._serialized_end=20006 - _POINTSUPDATEOPERATION_DELETEVECTORS._serialized_start=20009 - _POINTSUPDATEOPERATION_DELETEVECTORS._serialized_end=20197 - _POINTSUPDATEOPERATION_DELETEPOINTS._serialized_start=20200 - _POINTSUPDATEOPERATION_DELETEPOINTS._serialized_end=20336 - _POINTSUPDATEOPERATION_CLEARPAYLOAD._serialized_start=20339 - _POINTSUPDATEOPERATION_CLEARPAYLOAD._serialized_end=20475 - _UPDATEBATCHPOINTS._serialized_start=20491 - _UPDATEBATCHPOINTS._serialized_end=20673 - _POINTSOPERATIONRESPONSE._serialized_start=20675 - _POINTSOPERATIONRESPONSE._serialized_end=20797 - _UPDATERESULT._serialized_start=20799 - _UPDATERESULT._serialized_end=20895 - _ORDERVALUE._serialized_start=20897 - _ORDERVALUE._serialized_end=20952 - _SCOREDPOINT._serialized_start=20955 - _SCOREDPOINT._serialized_end=21324 - _SCOREDPOINT_PAYLOADENTRY._serialized_start=3811 - _SCOREDPOINT_PAYLOADENTRY._serialized_end=3872 - _GROUPID._serialized_start=21326 - _GROUPID._serialized_end=21418 - _POINTGROUP._serialized_start=21420 - _POINTGROUP._serialized_end=21536 - _GROUPSRESULT._serialized_start=21538 - _GROUPSRESULT._serialized_end=21588 - _SEARCHRESPONSE._serialized_start=21590 - _SEARCHRESPONSE._serialized_end=21702 - _QUERYRESPONSE._serialized_start=21704 - _QUERYRESPONSE._serialized_end=21815 - _QUERYBATCHRESPONSE._serialized_start=21817 - _QUERYBATCHRESPONSE._serialized_end=21933 - _QUERYGROUPSRESPONSE._serialized_start=21935 - _QUERYGROUPSRESPONSE._serialized_end=22053 - _BATCHRESULT._serialized_start=22055 - _BATCHRESULT._serialized_end=22105 - _SEARCHBATCHRESPONSE._serialized_start=22107 - _SEARCHBATCHRESPONSE._serialized_end=22224 - _SEARCHGROUPSRESPONSE._serialized_start=22226 - _SEARCHGROUPSRESPONSE._serialized_end=22345 - _COUNTRESPONSE._serialized_start=22347 - _COUNTRESPONSE._serialized_end=22458 - _SCROLLRESPONSE._serialized_start=22461 - _SCROLLRESPONSE._serialized_end=22645 - _COUNTRESULT._serialized_start=22647 - _COUNTRESULT._serialized_end=22675 - _RETRIEVEDPOINT._serialized_start=22678 - _RETRIEVEDPOINT._serialized_end=23021 - _RETRIEVEDPOINT_PAYLOADENTRY._serialized_start=3811 - _RETRIEVEDPOINT_PAYLOADENTRY._serialized_end=3872 - _GETRESPONSE._serialized_start=23023 - _GETRESPONSE._serialized_end=23135 - _RECOMMENDRESPONSE._serialized_start=23137 - _RECOMMENDRESPONSE._serialized_end=23252 - _RECOMMENDBATCHRESPONSE._serialized_start=23254 - _RECOMMENDBATCHRESPONSE._serialized_end=23374 - _DISCOVERRESPONSE._serialized_start=23376 - _DISCOVERRESPONSE._serialized_end=23490 - _DISCOVERBATCHRESPONSE._serialized_start=23492 - _DISCOVERBATCHRESPONSE._serialized_end=23611 - _RECOMMENDGROUPSRESPONSE._serialized_start=23613 - _RECOMMENDGROUPSRESPONSE._serialized_end=23735 - _UPDATEBATCHRESPONSE._serialized_start=23737 - _UPDATEBATCHRESPONSE._serialized_end=23855 - _FACETRESPONSE._serialized_start=23857 - _FACETRESPONSE._serialized_end=23918 - _SEARCHMATRIXPAIRSRESPONSE._serialized_start=23921 - _SEARCHMATRIXPAIRSRESPONSE._serialized_end=24050 - _SEARCHMATRIXOFFSETSRESPONSE._serialized_start=24053 - _SEARCHMATRIXOFFSETSRESPONSE._serialized_end=24186 - _FILTER._serialized_start=24189 - _FILTER._serialized_end=24361 - _MINSHOULD._serialized_start=24363 - _MINSHOULD._serialized_end=24432 - _CONDITION._serialized_start=24435 - _CONDITION._serialized_end=24766 - _ISEMPTYCONDITION._serialized_start=24768 - _ISEMPTYCONDITION._serialized_end=24799 - _ISNULLCONDITION._serialized_start=24801 - _ISNULLCONDITION._serialized_end=24831 - _HASIDCONDITION._serialized_start=24833 - _HASIDCONDITION._serialized_end=24882 - _HASVECTORCONDITION._serialized_start=24884 - _HASVECTORCONDITION._serialized_end=24924 - _NESTEDCONDITION._serialized_start=24926 - _NESTEDCONDITION._serialized_end=24988 - _FIELDCONDITION._serialized_start=24991 - _FIELDCONDITION._serialized_end=25370 - _MATCH._serialized_start=25373 - _MATCH._serialized_end=25682 - _REPEATEDSTRINGS._serialized_start=25684 - _REPEATEDSTRINGS._serialized_end=25718 - _REPEATEDINTEGERS._serialized_start=25720 - _REPEATEDINTEGERS._serialized_end=25756 - _RANGE._serialized_start=25758 - _RANGE._serialized_end=25865 - _DATETIMERANGE._serialized_start=25868 - _DATETIMERANGE._serialized_end=26095 - _GEOBOUNDINGBOX._serialized_start=26097 - _GEOBOUNDINGBOX._serialized_end=26189 - _GEORADIUS._serialized_start=26191 - _GEORADIUS._serialized_end=26252 - _GEOLINESTRING._serialized_start=26254 - _GEOLINESTRING._serialized_end=26303 - _GEOPOLYGON._serialized_start=26305 - _GEOPOLYGON._serialized_end=26400 - _VALUESCOUNT._serialized_start=26402 - _VALUESCOUNT._serialized_end=26515 - _POINTSSELECTOR._serialized_start=26517 - _POINTSSELECTOR._serialized_end=26634 - _POINTSIDSLIST._serialized_start=26636 - _POINTSIDSLIST._serialized_end=26681 - _POINTSTRUCT._serialized_start=26684 - _POINTSTRUCT._serialized_end=26897 - _POINTSTRUCT_PAYLOADENTRY._serialized_start=3811 - _POINTSTRUCT_PAYLOADENTRY._serialized_end=3872 - _GEOPOINT._serialized_start=26899 - _GEOPOINT._serialized_end=26935 - _USAGE._serialized_start=26938 - _USAGE._serialized_end=27066 - _INFERENCEUSAGE._serialized_start=27069 - _INFERENCEUSAGE._serialized_end=27204 - _INFERENCEUSAGE_MODELSENTRY._serialized_start=27139 - _INFERENCEUSAGE_MODELSENTRY._serialized_end=27204 - _MODELUSAGE._serialized_start=27206 - _MODELUSAGE._serialized_end=27234 - _HARDWAREUSAGE._serialized_start=27237 - _HARDWAREUSAGE._serialized_end=27428 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_DOCUMENT_OPTIONSENTRY']._options = None + _globals['_DOCUMENT_OPTIONSENTRY']._serialized_options = b'8\001' + _globals['_IMAGE_OPTIONSENTRY']._options = None + _globals['_IMAGE_OPTIONSENTRY']._serialized_options = b'8\001' + _globals['_INFERENCEOBJECT_OPTIONSENTRY']._options = None + _globals['_INFERENCEOBJECT_OPTIONSENTRY']._serialized_options = b'8\001' + _globals['_VECTOR'].fields_by_name['data']._options = None + _globals['_VECTOR'].fields_by_name['data']._serialized_options = b'\030\001' + _globals['_VECTOR'].fields_by_name['indices']._options = None + _globals['_VECTOR'].fields_by_name['indices']._serialized_options = b'\030\001' + _globals['_VECTOR'].fields_by_name['vectors_count']._options = None + _globals['_VECTOR'].fields_by_name['vectors_count']._serialized_options = b'\030\001' + _globals['_VECTOROUTPUT'].fields_by_name['data']._options = None + _globals['_VECTOROUTPUT'].fields_by_name['data']._serialized_options = b'\030\001' + _globals['_VECTOROUTPUT'].fields_by_name['indices']._options = None + _globals['_VECTOROUTPUT'].fields_by_name['indices']._serialized_options = b'\030\001' + _globals['_VECTOROUTPUT'].fields_by_name['vectors_count']._options = None + _globals['_VECTOROUTPUT'].fields_by_name['vectors_count']._serialized_options = b'\030\001' + _globals['_SETPAYLOADPOINTS_PAYLOADENTRY']._options = None + _globals['_SETPAYLOADPOINTS_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_NAMEDVECTORS_VECTORSENTRY']._options = None + _globals['_NAMEDVECTORS_VECTORSENTRY']._serialized_options = b'8\001' + _globals['_NAMEDVECTORSOUTPUT_VECTORSENTRY']._options = None + _globals['_NAMEDVECTORSOUTPUT_VECTORSENTRY']._serialized_options = b'8\001' + _globals['_FORMULA_DEFAULTSENTRY']._options = None + _globals['_FORMULA_DEFAULTSENTRY']._serialized_options = b'8\001' + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY']._options = None + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY']._options = None + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_POINTSUPDATEOPERATION'].fields_by_name['delete_deprecated']._options = None + _globals['_POINTSUPDATEOPERATION'].fields_by_name['delete_deprecated']._serialized_options = b'\030\001' + _globals['_POINTSUPDATEOPERATION'].fields_by_name['clear_payload_deprecated']._options = None + _globals['_POINTSUPDATEOPERATION'].fields_by_name['clear_payload_deprecated']._serialized_options = b'\030\001' + _globals['_SCOREDPOINT_PAYLOADENTRY']._options = None + _globals['_SCOREDPOINT_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_RETRIEVEDPOINT_PAYLOADENTRY']._options = None + _globals['_RETRIEVEDPOINT_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_POINTSTRUCT_PAYLOADENTRY']._options = None + _globals['_POINTSTRUCT_PAYLOADENTRY']._serialized_options = b'8\001' + _globals['_INFERENCEUSAGE_MODELSENTRY']._options = None + _globals['_INFERENCEUSAGE_MODELSENTRY']._serialized_options = b'8\001' + _globals['_WRITEORDERINGTYPE']._serialized_start=28038 + _globals['_WRITEORDERINGTYPE']._serialized_end=28091 + _globals['_READCONSISTENCYTYPE']._serialized_start=28093 + _globals['_READCONSISTENCYTYPE']._serialized_end=28149 + _globals['_FIELDTYPE']._serialized_start=28152 + _globals['_FIELDTYPE']._serialized_end=28325 + _globals['_DIRECTION']._serialized_start=28327 + _globals['_DIRECTION']._serialized_end=28357 + _globals['_RECOMMENDSTRATEGY']._serialized_start=28359 + _globals['_RECOMMENDSTRATEGY']._serialized_end=28427 + _globals['_FUSION']._serialized_start=28429 + _globals['_FUSION']._serialized_end=28456 + _globals['_SAMPLE']._serialized_start=28458 + _globals['_SAMPLE']._serialized_end=28478 + _globals['_UPDATESTATUS']._serialized_start=28480 + _globals['_UPDATESTATUS']._serialized_end=28571 + _globals['_WRITEORDERING']._serialized_start=97 + _globals['_WRITEORDERING']._serialized_end=153 + _globals['_READCONSISTENCY']._serialized_start=155 + _globals['_READCONSISTENCY']._serialized_end=244 + _globals['_POINTID']._serialized_start=246 + _globals['_POINTID']._serialized_end=306 + _globals['_SPARSEINDICES']._serialized_start=308 + _globals['_SPARSEINDICES']._serialized_end=337 + _globals['_DOCUMENT']._serialized_start=340 + _globals['_DOCUMENT']._serialized_end=490 + _globals['_DOCUMENT_OPTIONSENTRY']._serialized_start=429 + _globals['_DOCUMENT_OPTIONSENTRY']._serialized_end=490 + _globals['_IMAGE']._serialized_start=493 + _globals['_IMAGE']._serialized_end=653 + _globals['_IMAGE_OPTIONSENTRY']._serialized_start=429 + _globals['_IMAGE_OPTIONSENTRY']._serialized_end=490 + _globals['_INFERENCEOBJECT']._serialized_start=656 + _globals['_INFERENCEOBJECT']._serialized_end=837 + _globals['_INFERENCEOBJECT_OPTIONSENTRY']._serialized_start=429 + _globals['_INFERENCEOBJECT_OPTIONSENTRY']._serialized_end=490 + _globals['_VECTOR']._serialized_start=840 + _globals['_VECTOR']._serialized_end=1227 + _globals['_VECTOROUTPUT']._serialized_start=1230 + _globals['_VECTOROUTPUT']._serialized_end=1510 + _globals['_DENSEVECTOR']._serialized_start=1512 + _globals['_DENSEVECTOR']._serialized_end=1539 + _globals['_SPARSEVECTOR']._serialized_start=1541 + _globals['_SPARSEVECTOR']._serialized_end=1588 + _globals['_MULTIDENSEVECTOR']._serialized_start=1590 + _globals['_MULTIDENSEVECTOR']._serialized_end=1646 + _globals['_VECTORINPUT']._serialized_start=1649 + _globals['_VECTORINPUT']._serialized_end=1944 + _globals['_SHARDKEYSELECTOR']._serialized_start=1946 + _globals['_SHARDKEYSELECTOR']._serialized_end=2056 + _globals['_UPSERTPOINTS']._serialized_start=2059 + _globals['_UPSERTPOINTS']._serialized_end=2366 + _globals['_DELETEPOINTS']._serialized_start=2369 + _globals['_DELETEPOINTS']._serialized_end=2617 + _globals['_GETPOINTS']._serialized_start=2620 + _globals['_GETPOINTS']._serialized_end=3009 + _globals['_UPDATEPOINTVECTORS']._serialized_start=3012 + _globals['_UPDATEPOINTVECTORS']._serialized_end=3326 + _globals['_POINTVECTORS']._serialized_start=3328 + _globals['_POINTVECTORS']._serialized_end=3405 + _globals['_DELETEPOINTVECTORS']._serialized_start=3408 + _globals['_DELETEPOINTVECTORS']._serialized_end=3713 + _globals['_SETPAYLOADPOINTS']._serialized_start=3716 + _globals['_SETPAYLOADPOINTS']._serialized_end=4153 + _globals['_SETPAYLOADPOINTS_PAYLOADENTRY']._serialized_start=4013 + _globals['_SETPAYLOADPOINTS_PAYLOADENTRY']._serialized_end=4074 + _globals['_DELETEPAYLOADPOINTS']._serialized_start=4156 + _globals['_DELETEPAYLOADPOINTS']._serialized_end=4465 + _globals['_CLEARPAYLOADPOINTS']._serialized_start=4468 + _globals['_CLEARPAYLOADPOINTS']._serialized_end=4722 + _globals['_CREATEFIELDINDEXCOLLECTION']._serialized_start=4725 + _globals['_CREATEFIELDINDEXCOLLECTION']._serialized_end=5028 + _globals['_DELETEFIELDINDEXCOLLECTION']._serialized_start=5031 + _globals['_DELETEFIELDINDEXCOLLECTION']._serialized_end=5191 + _globals['_PAYLOADINCLUDESELECTOR']._serialized_start=5193 + _globals['_PAYLOADINCLUDESELECTOR']._serialized_end=5233 + _globals['_PAYLOADEXCLUDESELECTOR']._serialized_start=5235 + _globals['_PAYLOADEXCLUDESELECTOR']._serialized_end=5275 + _globals['_WITHPAYLOADSELECTOR']._serialized_start=5278 + _globals['_WITHPAYLOADSELECTOR']._serialized_end=5439 + _globals['_NAMEDVECTORS']._serialized_start=5442 + _globals['_NAMEDVECTORS']._serialized_end=5572 + _globals['_NAMEDVECTORS_VECTORSENTRY']._serialized_start=5510 + _globals['_NAMEDVECTORS_VECTORSENTRY']._serialized_end=5572 + _globals['_NAMEDVECTORSOUTPUT']._serialized_start=5575 + _globals['_NAMEDVECTORSOUTPUT']._serialized_end=5723 + _globals['_NAMEDVECTORSOUTPUT_VECTORSENTRY']._serialized_start=5655 + _globals['_NAMEDVECTORSOUTPUT_VECTORSENTRY']._serialized_end=5723 + _globals['_VECTORS']._serialized_start=5725 + _globals['_VECTORS']._serialized_end=5828 + _globals['_VECTORSOUTPUT']._serialized_start=5830 + _globals['_VECTORSOUTPUT']._serialized_end=5951 + _globals['_VECTORSSELECTOR']._serialized_start=5953 + _globals['_VECTORSSELECTOR']._serialized_end=5985 + _globals['_WITHVECTORSSELECTOR']._serialized_start=5987 + _globals['_WITHVECTORSSELECTOR']._serialized_end=6090 + _globals['_QUANTIZATIONSEARCHPARAMS']._serialized_start=6093 + _globals['_QUANTIZATIONSEARCHPARAMS']._serialized_end=6229 + _globals['_ACORNSEARCHPARAMS']._serialized_start=6231 + _globals['_ACORNSEARCHPARAMS']._serialized_end=6332 + _globals['_SEARCHPARAMS']._serialized_start=6335 + _globals['_SEARCHPARAMS']._serialized_end=6592 + _globals['_SEARCHPOINTS']._serialized_start=6595 + _globals['_SEARCHPOINTS']._serialized_end=7253 + _globals['_SEARCHBATCHPOINTS']._serialized_start=7256 + _globals['_SEARCHBATCHPOINTS']._serialized_end=7456 + _globals['_WITHLOOKUP']._serialized_start=7459 + _globals['_WITHLOOKUP']._serialized_end=7637 + _globals['_SEARCHPOINTGROUPS']._serialized_start=7640 + _globals['_SEARCHPOINTGROUPS']._serialized_end=8365 + _globals['_STARTFROM']._serialized_start=8367 + _globals['_STARTFROM']._serialized_end=8492 + _globals['_ORDERBY']._serialized_start=8495 + _globals['_ORDERBY']._serialized_end=8633 + _globals['_SCROLLPOINTS']._serialized_start=8636 + _globals['_SCROLLPOINTS']._serialized_end=9162 + _globals['_LOOKUPLOCATION']._serialized_start=9165 + _globals['_LOOKUPLOCATION']._serialized_end=9330 + _globals['_RECOMMENDPOINTS']._serialized_start=9333 + _globals['_RECOMMENDPOINTS']._serialized_end=10178 + _globals['_RECOMMENDBATCHPOINTS']._serialized_start=10181 + _globals['_RECOMMENDBATCHPOINTS']._serialized_end=10390 + _globals['_RECOMMENDPOINTGROUPS']._serialized_start=10393 + _globals['_RECOMMENDPOINTGROUPS']._serialized_end=11305 + _globals['_TARGETVECTOR']._serialized_start=11307 + _globals['_TARGETVECTOR']._serialized_end=11372 + _globals['_VECTOREXAMPLE']._serialized_start=11374 + _globals['_VECTOREXAMPLE']._serialized_end=11465 + _globals['_CONTEXTEXAMPLEPAIR']._serialized_start=11467 + _globals['_CONTEXTEXAMPLEPAIR']._serialized_end=11569 + _globals['_DISCOVERPOINTS']._serialized_start=11572 + _globals['_DISCOVERPOINTS']._serialized_end=12226 + _globals['_DISCOVERBATCHPOINTS']._serialized_start=12229 + _globals['_DISCOVERBATCHPOINTS']._serialized_end=12435 + _globals['_COUNTPOINTS']._serialized_start=12438 + _globals['_COUNTPOINTS']._serialized_end=12731 + _globals['_RECOMMENDINPUT']._serialized_start=12734 + _globals['_RECOMMENDINPUT']._serialized_end=12891 + _globals['_CONTEXTINPUTPAIR']._serialized_start=12893 + _globals['_CONTEXTINPUTPAIR']._serialized_end=12989 + _globals['_DISCOVERINPUT']._serialized_start=12991 + _globals['_DISCOVERINPUT']._serialized_end=13082 + _globals['_CONTEXTINPUT']._serialized_start=13084 + _globals['_CONTEXTINPUT']._serialized_end=13139 + _globals['_FORMULA']._serialized_start=13142 + _globals['_FORMULA']._serialized_end=13304 + _globals['_FORMULA_DEFAULTSENTRY']._serialized_start=13242 + _globals['_FORMULA_DEFAULTSENTRY']._serialized_end=13304 + _globals['_EXPRESSION']._serialized_start=13307 + _globals['_EXPRESSION']._serialized_end=14023 + _globals['_GEODISTANCE']._serialized_start=14025 + _globals['_GEODISTANCE']._serialized_end=14084 + _globals['_MULTEXPRESSION']._serialized_start=14086 + _globals['_MULTEXPRESSION']._serialized_end=14136 + _globals['_SUMEXPRESSION']._serialized_start=14138 + _globals['_SUMEXPRESSION']._serialized_end=14186 + _globals['_DIVEXPRESSION']._serialized_start=14189 + _globals['_DIVEXPRESSION']._serialized_end=14323 + _globals['_POWEXPRESSION']._serialized_start=14325 + _globals['_POWEXPRESSION']._serialized_end=14412 + _globals['_DECAYPARAMSEXPRESSION']._serialized_start=14415 + _globals['_DECAYPARAMSEXPRESSION']._serialized_end=14587 + _globals['_NEARESTINPUTWITHMMR']._serialized_start=14589 + _globals['_NEARESTINPUTWITHMMR']._serialized_end=14674 + _globals['_MMR']._serialized_start=14676 + _globals['_MMR']._serialized_end=14771 + _globals['_RRF']._serialized_start=14773 + _globals['_RRF']._serialized_end=14800 + _globals['_QUERY']._serialized_start=14803 + _globals['_QUERY']._serialized_end=15216 + _globals['_PREFETCHQUERY']._serialized_start=15219 + _globals['_PREFETCHQUERY']._serialized_end=15598 + _globals['_QUERYPOINTS']._serialized_start=15601 + _globals['_QUERYPOINTS']._serialized_end=16374 + _globals['_QUERYBATCHPOINTS']._serialized_start=16377 + _globals['_QUERYBATCHPOINTS']._serialized_end=16574 + _globals['_QUERYPOINTGROUPS']._serialized_start=16577 + _globals['_QUERYPOINTGROUPS']._serialized_end=17421 + _globals['_FACETCOUNTS']._serialized_start=17424 + _globals['_FACETCOUNTS']._serialized_end=17776 + _globals['_FACETVALUE']._serialized_start=17778 + _globals['_FACETVALUE']._serialized_end=17872 + _globals['_FACETHIT']._serialized_start=17874 + _globals['_FACETHIT']._serialized_end=17934 + _globals['_SEARCHMATRIXPOINTS']._serialized_start=17937 + _globals['_SEARCHMATRIXPOINTS']._serialized_end=18315 + _globals['_SEARCHMATRIXPAIRS']._serialized_start=18317 + _globals['_SEARCHMATRIXPAIRS']._serialized_end=18377 + _globals['_SEARCHMATRIXPAIR']._serialized_start=18379 + _globals['_SEARCHMATRIXPAIR']._serialized_end=18468 + _globals['_SEARCHMATRIXOFFSETS']._serialized_start=18470 + _globals['_SEARCHMATRIXOFFSETS']._serialized_end=18579 + _globals['_POINTSUPDATEOPERATION']._serialized_start=18582 + _globals['_POINTSUPDATEOPERATION']._serialized_end=21031 + _globals['_POINTSUPDATEOPERATION_POINTSTRUCTLIST']._serialized_start=19287 + _globals['_POINTSUPDATEOPERATION_POINTSTRUCTLIST']._serialized_end=19485 + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD']._serialized_start=19488 + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD']._serialized_end=19817 + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY']._serialized_start=4013 + _globals['_POINTSUPDATEOPERATION_SETPAYLOAD_PAYLOADENTRY']._serialized_end=4074 + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD']._serialized_start=19820 + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD']._serialized_end=20161 + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY']._serialized_start=4013 + _globals['_POINTSUPDATEOPERATION_OVERWRITEPAYLOAD_PAYLOADENTRY']._serialized_end=4074 + _globals['_POINTSUPDATEOPERATION_DELETEPAYLOAD']._serialized_start=20164 + _globals['_POINTSUPDATEOPERATION_DELETEPAYLOAD']._serialized_end=20349 + _globals['_POINTSUPDATEOPERATION_UPDATEVECTORS']._serialized_start=20352 + _globals['_POINTSUPDATEOPERATION_UPDATEVECTORS']._serialized_end=20549 + _globals['_POINTSUPDATEOPERATION_DELETEVECTORS']._serialized_start=20552 + _globals['_POINTSUPDATEOPERATION_DELETEVECTORS']._serialized_end=20740 + _globals['_POINTSUPDATEOPERATION_DELETEPOINTS']._serialized_start=20743 + _globals['_POINTSUPDATEOPERATION_DELETEPOINTS']._serialized_end=20879 + _globals['_POINTSUPDATEOPERATION_CLEARPAYLOAD']._serialized_start=20882 + _globals['_POINTSUPDATEOPERATION_CLEARPAYLOAD']._serialized_end=21018 + _globals['_UPDATEBATCHPOINTS']._serialized_start=21034 + _globals['_UPDATEBATCHPOINTS']._serialized_end=21216 + _globals['_POINTSOPERATIONRESPONSE']._serialized_start=21218 + _globals['_POINTSOPERATIONRESPONSE']._serialized_end=21340 + _globals['_UPDATERESULT']._serialized_start=21342 + _globals['_UPDATERESULT']._serialized_end=21438 + _globals['_ORDERVALUE']._serialized_start=21440 + _globals['_ORDERVALUE']._serialized_end=21495 + _globals['_SCOREDPOINT']._serialized_start=21498 + _globals['_SCOREDPOINT']._serialized_end=21867 + _globals['_SCOREDPOINT_PAYLOADENTRY']._serialized_start=4013 + _globals['_SCOREDPOINT_PAYLOADENTRY']._serialized_end=4074 + _globals['_GROUPID']._serialized_start=21869 + _globals['_GROUPID']._serialized_end=21961 + _globals['_POINTGROUP']._serialized_start=21963 + _globals['_POINTGROUP']._serialized_end=22079 + _globals['_GROUPSRESULT']._serialized_start=22081 + _globals['_GROUPSRESULT']._serialized_end=22131 + _globals['_SEARCHRESPONSE']._serialized_start=22133 + _globals['_SEARCHRESPONSE']._serialized_end=22245 + _globals['_QUERYRESPONSE']._serialized_start=22247 + _globals['_QUERYRESPONSE']._serialized_end=22358 + _globals['_QUERYBATCHRESPONSE']._serialized_start=22360 + _globals['_QUERYBATCHRESPONSE']._serialized_end=22476 + _globals['_QUERYGROUPSRESPONSE']._serialized_start=22478 + _globals['_QUERYGROUPSRESPONSE']._serialized_end=22596 + _globals['_BATCHRESULT']._serialized_start=22598 + _globals['_BATCHRESULT']._serialized_end=22648 + _globals['_SEARCHBATCHRESPONSE']._serialized_start=22650 + _globals['_SEARCHBATCHRESPONSE']._serialized_end=22767 + _globals['_SEARCHGROUPSRESPONSE']._serialized_start=22769 + _globals['_SEARCHGROUPSRESPONSE']._serialized_end=22888 + _globals['_COUNTRESPONSE']._serialized_start=22890 + _globals['_COUNTRESPONSE']._serialized_end=23001 + _globals['_SCROLLRESPONSE']._serialized_start=23004 + _globals['_SCROLLRESPONSE']._serialized_end=23188 + _globals['_COUNTRESULT']._serialized_start=23190 + _globals['_COUNTRESULT']._serialized_end=23218 + _globals['_RETRIEVEDPOINT']._serialized_start=23221 + _globals['_RETRIEVEDPOINT']._serialized_end=23564 + _globals['_RETRIEVEDPOINT_PAYLOADENTRY']._serialized_start=4013 + _globals['_RETRIEVEDPOINT_PAYLOADENTRY']._serialized_end=4074 + _globals['_GETRESPONSE']._serialized_start=23566 + _globals['_GETRESPONSE']._serialized_end=23678 + _globals['_RECOMMENDRESPONSE']._serialized_start=23680 + _globals['_RECOMMENDRESPONSE']._serialized_end=23795 + _globals['_RECOMMENDBATCHRESPONSE']._serialized_start=23797 + _globals['_RECOMMENDBATCHRESPONSE']._serialized_end=23917 + _globals['_DISCOVERRESPONSE']._serialized_start=23919 + _globals['_DISCOVERRESPONSE']._serialized_end=24033 + _globals['_DISCOVERBATCHRESPONSE']._serialized_start=24035 + _globals['_DISCOVERBATCHRESPONSE']._serialized_end=24154 + _globals['_RECOMMENDGROUPSRESPONSE']._serialized_start=24156 + _globals['_RECOMMENDGROUPSRESPONSE']._serialized_end=24278 + _globals['_UPDATEBATCHRESPONSE']._serialized_start=24280 + _globals['_UPDATEBATCHRESPONSE']._serialized_end=24398 + _globals['_FACETRESPONSE']._serialized_start=24400 + _globals['_FACETRESPONSE']._serialized_end=24506 + _globals['_SEARCHMATRIXPAIRSRESPONSE']._serialized_start=24509 + _globals['_SEARCHMATRIXPAIRSRESPONSE']._serialized_end=24638 + _globals['_SEARCHMATRIXOFFSETSRESPONSE']._serialized_start=24641 + _globals['_SEARCHMATRIXOFFSETSRESPONSE']._serialized_end=24774 + _globals['_FILTER']._serialized_start=24777 + _globals['_FILTER']._serialized_end=24949 + _globals['_MINSHOULD']._serialized_start=24951 + _globals['_MINSHOULD']._serialized_end=25020 + _globals['_CONDITION']._serialized_start=25023 + _globals['_CONDITION']._serialized_end=25354 + _globals['_ISEMPTYCONDITION']._serialized_start=25356 + _globals['_ISEMPTYCONDITION']._serialized_end=25387 + _globals['_ISNULLCONDITION']._serialized_start=25389 + _globals['_ISNULLCONDITION']._serialized_end=25419 + _globals['_HASIDCONDITION']._serialized_start=25421 + _globals['_HASIDCONDITION']._serialized_end=25470 + _globals['_HASVECTORCONDITION']._serialized_start=25472 + _globals['_HASVECTORCONDITION']._serialized_end=25512 + _globals['_NESTEDCONDITION']._serialized_start=25514 + _globals['_NESTEDCONDITION']._serialized_end=25576 + _globals['_FIELDCONDITION']._serialized_start=25579 + _globals['_FIELDCONDITION']._serialized_end=25958 + _globals['_MATCH']._serialized_start=25961 + _globals['_MATCH']._serialized_end=26290 + _globals['_REPEATEDSTRINGS']._serialized_start=26292 + _globals['_REPEATEDSTRINGS']._serialized_end=26326 + _globals['_REPEATEDINTEGERS']._serialized_start=26328 + _globals['_REPEATEDINTEGERS']._serialized_end=26364 + _globals['_RANGE']._serialized_start=26366 + _globals['_RANGE']._serialized_end=26473 + _globals['_DATETIMERANGE']._serialized_start=26476 + _globals['_DATETIMERANGE']._serialized_end=26703 + _globals['_GEOBOUNDINGBOX']._serialized_start=26705 + _globals['_GEOBOUNDINGBOX']._serialized_end=26797 + _globals['_GEORADIUS']._serialized_start=26799 + _globals['_GEORADIUS']._serialized_end=26860 + _globals['_GEOLINESTRING']._serialized_start=26862 + _globals['_GEOLINESTRING']._serialized_end=26911 + _globals['_GEOPOLYGON']._serialized_start=26913 + _globals['_GEOPOLYGON']._serialized_end=27008 + _globals['_VALUESCOUNT']._serialized_start=27010 + _globals['_VALUESCOUNT']._serialized_end=27123 + _globals['_POINTSSELECTOR']._serialized_start=27125 + _globals['_POINTSSELECTOR']._serialized_end=27242 + _globals['_POINTSIDSLIST']._serialized_start=27244 + _globals['_POINTSIDSLIST']._serialized_end=27289 + _globals['_POINTSTRUCT']._serialized_start=27292 + _globals['_POINTSTRUCT']._serialized_end=27505 + _globals['_POINTSTRUCT_PAYLOADENTRY']._serialized_start=4013 + _globals['_POINTSTRUCT_PAYLOADENTRY']._serialized_end=4074 + _globals['_GEOPOINT']._serialized_start=27507 + _globals['_GEOPOINT']._serialized_end=27543 + _globals['_USAGE']._serialized_start=27546 + _globals['_USAGE']._serialized_end=27674 + _globals['_INFERENCEUSAGE']._serialized_start=27677 + _globals['_INFERENCEUSAGE']._serialized_end=27812 + _globals['_INFERENCEUSAGE_MODELSENTRY']._serialized_start=27747 + _globals['_INFERENCEUSAGE_MODELSENTRY']._serialized_end=27812 + _globals['_MODELUSAGE']._serialized_start=27814 + _globals['_MODELUSAGE']._serialized_end=27842 + _globals['_HARDWAREUSAGE']._serialized_start=27845 + _globals['_HARDWAREUSAGE']._serialized_end=28036 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/points_pb2.pyi b/qdrant_client/grpc/points_pb2.pyi index 445dedcc..5f36bb06 100644 --- a/qdrant_client/grpc/points_pb2.pyi +++ b/qdrant_client/grpc/points_pb2.pyi @@ -154,14 +154,14 @@ class _Fusion: class _FusionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_Fusion.ValueType], builtins.type): # noqa: F821 DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor RRF: _Fusion.ValueType # 0 - """Reciprocal Rank Fusion""" + """Reciprocal Rank Fusion (with default parameters)""" DBSF: _Fusion.ValueType # 1 """Distribution-Based Score Fusion""" class Fusion(_Fusion, metaclass=_FusionEnumTypeWrapper): ... RRF: Fusion.ValueType # 0 -"""Reciprocal Rank Fusion""" +"""Reciprocal Rank Fusion (with default parameters)""" DBSF: Fusion.ValueType # 1 """Distribution-Based Score Fusion""" global___Fusion = Fusion @@ -415,8 +415,6 @@ class InferenceObject(google.protobuf.message.Message): global___InferenceObject = InferenceObject class Vector(google.protobuf.message.Message): - """Legacy vector format, which determines the vector type by the configuration of its fields.""" - DESCRIPTOR: google.protobuf.descriptor.Descriptor DATA_FIELD_NUMBER: builtins.int @@ -623,15 +621,21 @@ class ShardKeySelector(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor SHARD_KEYS_FIELD_NUMBER: builtins.int + FALLBACK_FIELD_NUMBER: builtins.int @property def shard_keys(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[collections_pb2.ShardKey]: """List of shard keys which should be used in the request""" + @property + def fallback(self) -> collections_pb2.ShardKey: ... def __init__( self, *, shard_keys: collections.abc.Iterable[collections_pb2.ShardKey] | None = ..., + fallback: collections_pb2.ShardKey | None = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["shard_keys", b"shard_keys"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_fallback", b"_fallback", "fallback", b"fallback"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_fallback", b"_fallback", "fallback", b"fallback", "shard_keys", b"shard_keys"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_fallback", b"_fallback"]) -> typing_extensions.Literal["fallback"] | None: ... global___ShardKeySelector = ShardKeySelector @@ -648,6 +652,7 @@ class UpsertPoints(google.protobuf.message.Message): POINTS_FIELD_NUMBER: builtins.int ORDERING_FIELD_NUMBER: builtins.int SHARD_KEY_SELECTOR_FIELD_NUMBER: builtins.int + UPDATE_FILTER_FIELD_NUMBER: builtins.int collection_name: builtins.str """name of the collection""" wait: builtins.bool @@ -660,6 +665,9 @@ class UpsertPoints(google.protobuf.message.Message): @property def shard_key_selector(self) -> global___ShardKeySelector: """Option for custom sharding to specify used shard keys""" + @property + def update_filter(self) -> global___Filter: + """If specified, only points that match this filter will be updated, others will be inserted""" def __init__( self, *, @@ -668,14 +676,17 @@ class UpsertPoints(google.protobuf.message.Message): points: collections.abc.Iterable[global___PointStruct] | None = ..., ordering: global___WriteOrdering | None = ..., shard_key_selector: global___ShardKeySelector | None = ..., + update_filter: global___Filter | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_wait", b"_wait", "ordering", b"ordering", "shard_key_selector", b"shard_key_selector", "wait", b"wait"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_wait", b"_wait", "collection_name", b"collection_name", "ordering", b"ordering", "points", b"points", "shard_key_selector", b"shard_key_selector", "wait", b"wait"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "_wait", b"_wait", "ordering", b"ordering", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter", "wait", b"wait"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "_wait", b"_wait", "collection_name", b"collection_name", "ordering", b"ordering", "points", b"points", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter", "wait", b"wait"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_ordering", b"_ordering"]) -> typing_extensions.Literal["ordering"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector"]) -> typing_extensions.Literal["shard_key_selector"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_update_filter", b"_update_filter"]) -> typing_extensions.Literal["update_filter"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_wait", b"_wait"]) -> typing_extensions.Literal["wait"] | None: ... global___UpsertPoints = UpsertPoints @@ -782,6 +793,7 @@ class UpdatePointVectors(google.protobuf.message.Message): POINTS_FIELD_NUMBER: builtins.int ORDERING_FIELD_NUMBER: builtins.int SHARD_KEY_SELECTOR_FIELD_NUMBER: builtins.int + UPDATE_FILTER_FIELD_NUMBER: builtins.int collection_name: builtins.str """name of the collection""" wait: builtins.bool @@ -795,6 +807,9 @@ class UpdatePointVectors(google.protobuf.message.Message): @property def shard_key_selector(self) -> global___ShardKeySelector: """Option for custom sharding to specify used shard keys""" + @property + def update_filter(self) -> global___Filter: + """If specified, only points that match this filter will be updated""" def __init__( self, *, @@ -803,14 +818,17 @@ class UpdatePointVectors(google.protobuf.message.Message): points: collections.abc.Iterable[global___PointVectors] | None = ..., ordering: global___WriteOrdering | None = ..., shard_key_selector: global___ShardKeySelector | None = ..., + update_filter: global___Filter | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_wait", b"_wait", "ordering", b"ordering", "shard_key_selector", b"shard_key_selector", "wait", b"wait"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_wait", b"_wait", "collection_name", b"collection_name", "ordering", b"ordering", "points", b"points", "shard_key_selector", b"shard_key_selector", "wait", b"wait"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "_wait", b"_wait", "ordering", b"ordering", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter", "wait", b"wait"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_ordering", b"_ordering", "_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "_wait", b"_wait", "collection_name", b"collection_name", "ordering", b"ordering", "points", b"points", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter", "wait", b"wait"]) -> None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_ordering", b"_ordering"]) -> typing_extensions.Literal["ordering"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector"]) -> typing_extensions.Literal["shard_key_selector"] | None: ... @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_update_filter", b"_update_filter"]) -> typing_extensions.Literal["update_filter"] | None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_wait", b"_wait"]) -> typing_extensions.Literal["wait"] | None: ... global___UpdatePointVectors = UpdatePointVectors @@ -1365,6 +1383,44 @@ class QuantizationSearchParams(google.protobuf.message.Message): global___QuantizationSearchParams = QuantizationSearchParams +class AcornSearchParams(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + ENABLE_FIELD_NUMBER: builtins.int + MAX_SELECTIVITY_FIELD_NUMBER: builtins.int + enable: builtins.bool + """ + If true, then ACORN may be used for the HNSW search based on filters + selectivity. + + Improves search recall for searches with multiple low-selectivity + payload filters, at cost of performance. + """ + max_selectivity: builtins.float + """ + Maximum selectivity of filters to enable ACORN. + + If estimated filters selectivity is higher than this value, + ACORN will not be used. Selectivity is estimated as: + `estimated number of points satisfying the filters / total number of points`. + + 0.0 for never, 1.0 for always. Default is 0.4. + """ + def __init__( + self, + *, + enable: builtins.bool | None = ..., + max_selectivity: builtins.float | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_enable", b"_enable", "_max_selectivity", b"_max_selectivity", "enable", b"enable", "max_selectivity", b"max_selectivity"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_enable", b"_enable", "_max_selectivity", b"_max_selectivity", "enable", b"enable", "max_selectivity", b"max_selectivity"]) -> None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_enable", b"_enable"]) -> typing_extensions.Literal["enable"] | None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_max_selectivity", b"_max_selectivity"]) -> typing_extensions.Literal["max_selectivity"] | None: ... + +global___AcornSearchParams = AcornSearchParams + class SearchParams(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -1372,6 +1428,7 @@ class SearchParams(google.protobuf.message.Message): EXACT_FIELD_NUMBER: builtins.int QUANTIZATION_FIELD_NUMBER: builtins.int INDEXED_ONLY_FIELD_NUMBER: builtins.int + ACORN_FIELD_NUMBER: builtins.int hnsw_ef: builtins.int """ Params relevant to HNSW index. Size of the beam in a beam-search. @@ -1392,6 +1449,11 @@ class SearchParams(google.protobuf.message.Message): Using this option prevents slow searches in case of delayed index, but does not guarantee that all uploaded vectors will be included in search results """ + @property + def acorn(self) -> global___AcornSearchParams: + """ + ACORN search params + """ def __init__( self, *, @@ -1399,9 +1461,12 @@ class SearchParams(google.protobuf.message.Message): exact: builtins.bool | None = ..., quantization: global___QuantizationSearchParams | None = ..., indexed_only: builtins.bool | None = ..., + acorn: global___AcornSearchParams | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_exact", b"_exact", "_hnsw_ef", b"_hnsw_ef", "_indexed_only", b"_indexed_only", "_quantization", b"_quantization", "exact", b"exact", "hnsw_ef", b"hnsw_ef", "indexed_only", b"indexed_only", "quantization", b"quantization"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_exact", b"_exact", "_hnsw_ef", b"_hnsw_ef", "_indexed_only", b"_indexed_only", "_quantization", b"_quantization", "exact", b"exact", "hnsw_ef", b"hnsw_ef", "indexed_only", b"indexed_only", "quantization", b"quantization"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_acorn", b"_acorn", "_exact", b"_exact", "_hnsw_ef", b"_hnsw_ef", "_indexed_only", b"_indexed_only", "_quantization", b"_quantization", "acorn", b"acorn", "exact", b"exact", "hnsw_ef", b"hnsw_ef", "indexed_only", b"indexed_only", "quantization", b"quantization"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_acorn", b"_acorn", "_exact", b"_exact", "_hnsw_ef", b"_hnsw_ef", "_indexed_only", b"_indexed_only", "_quantization", b"_quantization", "acorn", b"acorn", "exact", b"exact", "hnsw_ef", b"hnsw_ef", "indexed_only", b"indexed_only", "quantization", b"quantization"]) -> None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_acorn", b"_acorn"]) -> typing_extensions.Literal["acorn"] | None: ... @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_exact", b"_exact"]) -> typing_extensions.Literal["exact"] | None: ... @typing.overload @@ -2655,7 +2720,7 @@ class DecayParamsExpression(google.protobuf.message.Message): scale: builtins.float """The scale factor of the decay, in terms of `x`. Defaults to 1.0. Must be a non-zero positive number.""" midpoint: builtins.float - """The midpoint of the decay. Defaults to 0.5. Output will be this value when `|x - target| == scale`.""" + """The midpoint of the decay. Should be between 0 and 1. Defaults to 0.5. Output will be this value when `|x - target| == scale`.""" def __init__( self, *, @@ -2736,6 +2801,25 @@ class Mmr(google.protobuf.message.Message): global___Mmr = Mmr +class Rrf(google.protobuf.message.Message): + """Parameterized reciprocal rank fusion""" + + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + K_FIELD_NUMBER: builtins.int + k: builtins.int + """K parameter for reciprocal rank fusion""" + def __init__( + self, + *, + k: builtins.int | None = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_k", b"_k", "k", b"k"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_k", b"_k", "k", b"k"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_k", b"_k"]) -> typing_extensions.Literal["k"] | None: ... + +global___Rrf = Rrf + class Query(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -2748,6 +2832,7 @@ class Query(google.protobuf.message.Message): SAMPLE_FIELD_NUMBER: builtins.int FORMULA_FIELD_NUMBER: builtins.int NEAREST_WITH_MMR_FIELD_NUMBER: builtins.int + RRF_FIELD_NUMBER: builtins.int @property def nearest(self) -> global___VectorInput: """Find the nearest neighbors to this vector.""" @@ -2773,6 +2858,9 @@ class Query(google.protobuf.message.Message): @property def nearest_with_mmr(self) -> global___NearestInputWithMmr: """Search nearest neighbors, but re-rank based on the Maximal Marginal Relevance algorithm.""" + @property + def rrf(self) -> global___Rrf: + """Parameterized reciprocal rank fusion""" def __init__( self, *, @@ -2785,10 +2873,11 @@ class Query(google.protobuf.message.Message): sample: global___Sample.ValueType = ..., formula: global___Formula | None = ..., nearest_with_mmr: global___NearestInputWithMmr | None = ..., + rrf: global___Rrf | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["context", b"context", "discover", b"discover", "formula", b"formula", "fusion", b"fusion", "nearest", b"nearest", "nearest_with_mmr", b"nearest_with_mmr", "order_by", b"order_by", "recommend", b"recommend", "sample", b"sample", "variant", b"variant"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["context", b"context", "discover", b"discover", "formula", b"formula", "fusion", b"fusion", "nearest", b"nearest", "nearest_with_mmr", b"nearest_with_mmr", "order_by", b"order_by", "recommend", b"recommend", "sample", b"sample", "variant", b"variant"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["variant", b"variant"]) -> typing_extensions.Literal["nearest", "recommend", "discover", "context", "order_by", "fusion", "sample", "formula", "nearest_with_mmr"] | None: ... + def HasField(self, field_name: typing_extensions.Literal["context", b"context", "discover", b"discover", "formula", b"formula", "fusion", b"fusion", "nearest", b"nearest", "nearest_with_mmr", b"nearest_with_mmr", "order_by", b"order_by", "recommend", b"recommend", "rrf", b"rrf", "sample", b"sample", "variant", b"variant"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["context", b"context", "discover", b"discover", "formula", b"formula", "fusion", b"fusion", "nearest", b"nearest", "nearest_with_mmr", b"nearest_with_mmr", "order_by", b"order_by", "recommend", b"recommend", "rrf", b"rrf", "sample", b"sample", "variant", b"variant"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["variant", b"variant"]) -> typing_extensions.Literal["nearest", "recommend", "discover", "context", "order_by", "fusion", "sample", "formula", "nearest_with_mmr", "rrf"] | None: ... global___Query = Query @@ -3357,20 +3446,28 @@ class PointsUpdateOperation(google.protobuf.message.Message): POINTS_FIELD_NUMBER: builtins.int SHARD_KEY_SELECTOR_FIELD_NUMBER: builtins.int + UPDATE_FILTER_FIELD_NUMBER: builtins.int @property def points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PointStruct]: ... @property def shard_key_selector(self) -> global___ShardKeySelector: """Option for custom sharding to specify used shard keys""" + @property + def update_filter(self) -> global___Filter: + """If specified, only points that match this filter will be updated, others will be inserted""" def __init__( self, *, points: collections.abc.Iterable[global___PointStruct] | None = ..., shard_key_selector: global___ShardKeySelector | None = ..., + update_filter: global___Filter | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "shard_key_selector", b"shard_key_selector"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "points", b"points", "shard_key_selector", b"shard_key_selector"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "points", b"points", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter"]) -> None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector"]) -> typing_extensions.Literal["shard_key_selector"] | None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_update_filter", b"_update_filter"]) -> typing_extensions.Literal["update_filter"] | None: ... class SetPayload(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -3507,21 +3604,29 @@ class PointsUpdateOperation(google.protobuf.message.Message): POINTS_FIELD_NUMBER: builtins.int SHARD_KEY_SELECTOR_FIELD_NUMBER: builtins.int + UPDATE_FILTER_FIELD_NUMBER: builtins.int @property def points(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___PointVectors]: """List of points and vectors to update""" @property def shard_key_selector(self) -> global___ShardKeySelector: """Option for custom sharding to specify used shard keys""" + @property + def update_filter(self) -> global___Filter: + """If specified, only points that match this filter will be updated""" def __init__( self, *, points: collections.abc.Iterable[global___PointVectors] | None = ..., shard_key_selector: global___ShardKeySelector | None = ..., + update_filter: global___Filter | None = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "shard_key_selector", b"shard_key_selector"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "points", b"points", "shard_key_selector", b"shard_key_selector"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector", "_update_filter", b"_update_filter", "points", b"points", "shard_key_selector", b"shard_key_selector", "update_filter", b"update_filter"]) -> None: ... + @typing.overload def WhichOneof(self, oneof_group: typing_extensions.Literal["_shard_key_selector", b"_shard_key_selector"]) -> typing_extensions.Literal["shard_key_selector"] | None: ... + @typing.overload + def WhichOneof(self, oneof_group: typing_extensions.Literal["_update_filter", b"_update_filter"]) -> typing_extensions.Literal["update_filter"] | None: ... class DeleteVectors(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor @@ -4354,17 +4459,23 @@ class FacetResponse(google.protobuf.message.Message): HITS_FIELD_NUMBER: builtins.int TIME_FIELD_NUMBER: builtins.int + USAGE_FIELD_NUMBER: builtins.int @property def hits(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___FacetHit]: ... time: builtins.float """Time spent to process""" + @property + def usage(self) -> global___Usage: ... def __init__( self, *, hits: collections.abc.Iterable[global___FacetHit] | None = ..., time: builtins.float = ..., + usage: global___Usage | None = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["hits", b"hits", "time", b"time"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["_usage", b"_usage", "usage", b"usage"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["_usage", b"_usage", "hits", b"hits", "time", b"time", "usage", b"usage"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["_usage", b"_usage"]) -> typing_extensions.Literal["usage"] | None: ... global___FacetResponse = FacetResponse @@ -4667,6 +4778,7 @@ class Match(google.protobuf.message.Message): EXCEPT_INTEGERS_FIELD_NUMBER: builtins.int EXCEPT_KEYWORDS_FIELD_NUMBER: builtins.int PHRASE_FIELD_NUMBER: builtins.int + TEXT_ANY_FIELD_NUMBER: builtins.int keyword: builtins.str """Match string keyword""" integer: builtins.int @@ -4689,6 +4801,8 @@ class Match(google.protobuf.message.Message): """Match any other value except those keywords""" phrase: builtins.str """Match phrase text""" + text_any: builtins.str + """Match any word in the text""" def __init__( self, *, @@ -4701,10 +4815,11 @@ class Match(google.protobuf.message.Message): except_integers: global___RepeatedIntegers | None = ..., except_keywords: global___RepeatedStrings | None = ..., phrase: builtins.str = ..., + text_any: builtins.str = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["boolean", b"boolean", "except_integers", b"except_integers", "except_keywords", b"except_keywords", "integer", b"integer", "integers", b"integers", "keyword", b"keyword", "keywords", b"keywords", "match_value", b"match_value", "phrase", b"phrase", "text", b"text"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["boolean", b"boolean", "except_integers", b"except_integers", "except_keywords", b"except_keywords", "integer", b"integer", "integers", b"integers", "keyword", b"keyword", "keywords", b"keywords", "match_value", b"match_value", "phrase", b"phrase", "text", b"text"]) -> None: ... - def WhichOneof(self, oneof_group: typing_extensions.Literal["match_value", b"match_value"]) -> typing_extensions.Literal["keyword", "integer", "boolean", "text", "keywords", "integers", "except_integers", "except_keywords", "phrase"] | None: ... + def HasField(self, field_name: typing_extensions.Literal["boolean", b"boolean", "except_integers", b"except_integers", "except_keywords", b"except_keywords", "integer", b"integer", "integers", b"integers", "keyword", b"keyword", "keywords", b"keywords", "match_value", b"match_value", "phrase", b"phrase", "text", b"text", "text_any", b"text_any"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["boolean", b"boolean", "except_integers", b"except_integers", "except_keywords", b"except_keywords", "integer", b"integer", "integers", b"integers", "keyword", b"keyword", "keywords", b"keywords", "match_value", b"match_value", "phrase", b"phrase", "text", b"text", "text_any", b"text_any"]) -> None: ... + def WhichOneof(self, oneof_group: typing_extensions.Literal["match_value", b"match_value"]) -> typing_extensions.Literal["keyword", "integer", "boolean", "text", "keywords", "integers", "except_integers", "except_keywords", "phrase", "text_any"] | None: ... global___Match = Match diff --git a/qdrant_client/grpc/points_service_pb2.py b/qdrant_client/grpc/points_service_pb2.py index 22dbd507..853e67cd 100644 --- a/qdrant_client/grpc/points_service_pb2.py +++ b/qdrant_client/grpc/points_service_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: points_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,13 +17,12 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x14points_service.proto\x12\x06qdrant\x1a\x0cpoints.proto2\xfd\x0f\n\x06Points\x12\x41\n\x06Upsert\x12\x14.qdrant.UpsertPoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12\x41\n\x06\x44\x65lete\x12\x14.qdrant.DeletePoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12/\n\x03Get\x12\x11.qdrant.GetPoints\x1a\x13.qdrant.GetResponse\"\x00\x12N\n\rUpdateVectors\x12\x1a.qdrant.UpdatePointVectors\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12N\n\rDeleteVectors\x12\x1a.qdrant.DeletePointVectors\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12I\n\nSetPayload\x12\x18.qdrant.SetPayloadPoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12O\n\x10OverwritePayload\x12\x18.qdrant.SetPayloadPoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12O\n\rDeletePayload\x12\x1b.qdrant.DeletePayloadPoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12M\n\x0c\x43learPayload\x12\x1a.qdrant.ClearPayloadPoints\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12Y\n\x10\x43reateFieldIndex\x12\".qdrant.CreateFieldIndexCollection\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12Y\n\x10\x44\x65leteFieldIndex\x12\".qdrant.DeleteFieldIndexCollection\x1a\x1f.qdrant.PointsOperationResponse\"\x00\x12\x38\n\x06Search\x12\x14.qdrant.SearchPoints\x1a\x16.qdrant.SearchResponse\"\x00\x12G\n\x0bSearchBatch\x12\x19.qdrant.SearchBatchPoints\x1a\x1b.qdrant.SearchBatchResponse\"\x00\x12I\n\x0cSearchGroups\x12\x19.qdrant.SearchPointGroups\x1a\x1c.qdrant.SearchGroupsResponse\"\x00\x12\x38\n\x06Scroll\x12\x14.qdrant.ScrollPoints\x1a\x16.qdrant.ScrollResponse\"\x00\x12\x41\n\tRecommend\x12\x17.qdrant.RecommendPoints\x1a\x19.qdrant.RecommendResponse\"\x00\x12P\n\x0eRecommendBatch\x12\x1c.qdrant.RecommendBatchPoints\x1a\x1e.qdrant.RecommendBatchResponse\"\x00\x12R\n\x0fRecommendGroups\x12\x1c.qdrant.RecommendPointGroups\x1a\x1f.qdrant.RecommendGroupsResponse\"\x00\x12>\n\x08\x44iscover\x12\x16.qdrant.DiscoverPoints\x1a\x18.qdrant.DiscoverResponse\"\x00\x12M\n\rDiscoverBatch\x12\x1b.qdrant.DiscoverBatchPoints\x1a\x1d.qdrant.DiscoverBatchResponse\"\x00\x12\x35\n\x05\x43ount\x12\x13.qdrant.CountPoints\x1a\x15.qdrant.CountResponse\"\x00\x12G\n\x0bUpdateBatch\x12\x19.qdrant.UpdateBatchPoints\x1a\x1b.qdrant.UpdateBatchResponse\"\x00\x12\x35\n\x05Query\x12\x13.qdrant.QueryPoints\x1a\x15.qdrant.QueryResponse\"\x00\x12\x44\n\nQueryBatch\x12\x18.qdrant.QueryBatchPoints\x1a\x1a.qdrant.QueryBatchResponse\"\x00\x12\x46\n\x0bQueryGroups\x12\x18.qdrant.QueryPointGroups\x1a\x1b.qdrant.QueryGroupsResponse\"\x00\x12\x35\n\x05\x46\x61\x63\x65t\x12\x13.qdrant.FacetCounts\x1a\x15.qdrant.FacetResponse\"\x00\x12T\n\x11SearchMatrixPairs\x12\x1a.qdrant.SearchMatrixPoints\x1a!.qdrant.SearchMatrixPairsResponse\"\x00\x12X\n\x13SearchMatrixOffsets\x12\x1a.qdrant.SearchMatrixPoints\x1a#.qdrant.SearchMatrixOffsetsResponse\"\x00\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - - -_POINTS = DESCRIPTOR.services_by_name['Points'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'points_service_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _POINTS._serialized_start=47 - _POINTS._serialized_end=2092 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_POINTS']._serialized_start=47 + _globals['_POINTS']._serialized_end=2092 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/qdrant_pb2.py b/qdrant_client/grpc/qdrant_pb2.py index 660f650f..570fb1b4 100644 --- a/qdrant_client/grpc/qdrant_pb2.py +++ b/qdrant_client/grpc/qdrant_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: qdrant.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -19,33 +19,16 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0cqdrant.proto\x12\x06qdrant\x1a\x19\x63ollections_service.proto\x1a\x14points_service.proto\x1a\x17snapshots_service.proto\"\x14\n\x12HealthCheckRequest\"R\n\x10HealthCheckReply\x12\r\n\x05title\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x13\n\x06\x63ommit\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_commit2O\n\x06Qdrant\x12\x45\n\x0bHealthCheck\x12\x1a.qdrant.HealthCheckRequest\x1a\x18.qdrant.HealthCheckReply\"\x00\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - - -_HEALTHCHECKREQUEST = DESCRIPTOR.message_types_by_name['HealthCheckRequest'] -_HEALTHCHECKREPLY = DESCRIPTOR.message_types_by_name['HealthCheckReply'] -HealthCheckRequest = _reflection.GeneratedProtocolMessageType('HealthCheckRequest', (_message.Message,), { - 'DESCRIPTOR' : _HEALTHCHECKREQUEST, - '__module__' : 'qdrant_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HealthCheckRequest) - }) -_sym_db.RegisterMessage(HealthCheckRequest) - -HealthCheckReply = _reflection.GeneratedProtocolMessageType('HealthCheckReply', (_message.Message,), { - 'DESCRIPTOR' : _HEALTHCHECKREPLY, - '__module__' : 'qdrant_pb2' - # @@protoc_insertion_point(class_scope:qdrant.HealthCheckReply) - }) -_sym_db.RegisterMessage(HealthCheckReply) - -_QDRANT = DESCRIPTOR.services_by_name['Qdrant'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'qdrant_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _HEALTHCHECKREQUEST._serialized_start=98 - _HEALTHCHECKREQUEST._serialized_end=118 - _HEALTHCHECKREPLY._serialized_start=120 - _HEALTHCHECKREPLY._serialized_end=202 - _QDRANT._serialized_start=204 - _QDRANT._serialized_end=283 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_HEALTHCHECKREQUEST']._serialized_start=98 + _globals['_HEALTHCHECKREQUEST']._serialized_end=118 + _globals['_HEALTHCHECKREPLY']._serialized_start=120 + _globals['_HEALTHCHECKREPLY']._serialized_end=202 + _globals['_QDRANT']._serialized_start=204 + _globals['_QDRANT']._serialized_end=283 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/grpc/snapshots_service_pb2.py b/qdrant_client/grpc/snapshots_service_pb2.py index 39655c3c..0d2e21fb 100644 --- a/qdrant_client/grpc/snapshots_service_pb2.py +++ b/qdrant_client/grpc/snapshots_service_pb2.py @@ -1,12 +1,12 @@ # -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: snapshots_service.proto +# Protobuf Python Version: 4.25.1 """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() @@ -17,113 +17,32 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17snapshots_service.proto\x12\x06qdrant\x1a\x1fgoogle/protobuf/timestamp.proto\"\x1b\n\x19\x43reateFullSnapshotRequest\"\x1a\n\x18ListFullSnapshotsRequest\"2\n\x19\x44\x65leteFullSnapshotRequest\x12\x15\n\rsnapshot_name\x18\x01 \x01(\t\"0\n\x15\x43reateSnapshotRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"/\n\x14ListSnapshotsRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\"G\n\x15\x44\x65leteSnapshotRequest\x12\x17\n\x0f\x63ollection_name\x18\x01 \x01(\t\x12\x15\n\rsnapshot_name\x18\x02 \x01(\t\"\x88\x01\n\x13SnapshotDescription\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\rcreation_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04size\x18\x03 \x01(\x03\x12\x15\n\x08\x63hecksum\x18\x04 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_checksum\"a\n\x16\x43reateSnapshotResponse\x12\x39\n\x14snapshot_description\x18\x01 \x01(\x0b\x32\x1b.qdrant.SnapshotDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"a\n\x15ListSnapshotsResponse\x12:\n\x15snapshot_descriptions\x18\x01 \x03(\x0b\x32\x1b.qdrant.SnapshotDescription\x12\x0c\n\x04time\x18\x02 \x01(\x01\"&\n\x16\x44\x65leteSnapshotResponse\x12\x0c\n\x04time\x18\x01 \x01(\x01\x32\xdd\x03\n\tSnapshots\x12I\n\x06\x43reate\x12\x1d.qdrant.CreateSnapshotRequest\x1a\x1e.qdrant.CreateSnapshotResponse\"\x00\x12\x45\n\x04List\x12\x1c.qdrant.ListSnapshotsRequest\x1a\x1d.qdrant.ListSnapshotsResponse\"\x00\x12I\n\x06\x44\x65lete\x12\x1d.qdrant.DeleteSnapshotRequest\x1a\x1e.qdrant.DeleteSnapshotResponse\"\x00\x12Q\n\nCreateFull\x12!.qdrant.CreateFullSnapshotRequest\x1a\x1e.qdrant.CreateSnapshotResponse\"\x00\x12M\n\x08ListFull\x12 .qdrant.ListFullSnapshotsRequest\x1a\x1d.qdrant.ListSnapshotsResponse\"\x00\x12Q\n\nDeleteFull\x12!.qdrant.DeleteFullSnapshotRequest\x1a\x1e.qdrant.DeleteSnapshotResponse\"\x00\x42\x15\xaa\x02\x12Qdrant.Client.Grpcb\x06proto3') - - -_CREATEFULLSNAPSHOTREQUEST = DESCRIPTOR.message_types_by_name['CreateFullSnapshotRequest'] -_LISTFULLSNAPSHOTSREQUEST = DESCRIPTOR.message_types_by_name['ListFullSnapshotsRequest'] -_DELETEFULLSNAPSHOTREQUEST = DESCRIPTOR.message_types_by_name['DeleteFullSnapshotRequest'] -_CREATESNAPSHOTREQUEST = DESCRIPTOR.message_types_by_name['CreateSnapshotRequest'] -_LISTSNAPSHOTSREQUEST = DESCRIPTOR.message_types_by_name['ListSnapshotsRequest'] -_DELETESNAPSHOTREQUEST = DESCRIPTOR.message_types_by_name['DeleteSnapshotRequest'] -_SNAPSHOTDESCRIPTION = DESCRIPTOR.message_types_by_name['SnapshotDescription'] -_CREATESNAPSHOTRESPONSE = DESCRIPTOR.message_types_by_name['CreateSnapshotResponse'] -_LISTSNAPSHOTSRESPONSE = DESCRIPTOR.message_types_by_name['ListSnapshotsResponse'] -_DELETESNAPSHOTRESPONSE = DESCRIPTOR.message_types_by_name['DeleteSnapshotResponse'] -CreateFullSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateFullSnapshotRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATEFULLSNAPSHOTREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateFullSnapshotRequest) - }) -_sym_db.RegisterMessage(CreateFullSnapshotRequest) - -ListFullSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListFullSnapshotsRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTFULLSNAPSHOTSREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListFullSnapshotsRequest) - }) -_sym_db.RegisterMessage(ListFullSnapshotsRequest) - -DeleteFullSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteFullSnapshotRequest', (_message.Message,), { - 'DESCRIPTOR' : _DELETEFULLSNAPSHOTREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteFullSnapshotRequest) - }) -_sym_db.RegisterMessage(DeleteFullSnapshotRequest) - -CreateSnapshotRequest = _reflection.GeneratedProtocolMessageType('CreateSnapshotRequest', (_message.Message,), { - 'DESCRIPTOR' : _CREATESNAPSHOTREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateSnapshotRequest) - }) -_sym_db.RegisterMessage(CreateSnapshotRequest) - -ListSnapshotsRequest = _reflection.GeneratedProtocolMessageType('ListSnapshotsRequest', (_message.Message,), { - 'DESCRIPTOR' : _LISTSNAPSHOTSREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListSnapshotsRequest) - }) -_sym_db.RegisterMessage(ListSnapshotsRequest) - -DeleteSnapshotRequest = _reflection.GeneratedProtocolMessageType('DeleteSnapshotRequest', (_message.Message,), { - 'DESCRIPTOR' : _DELETESNAPSHOTREQUEST, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteSnapshotRequest) - }) -_sym_db.RegisterMessage(DeleteSnapshotRequest) - -SnapshotDescription = _reflection.GeneratedProtocolMessageType('SnapshotDescription', (_message.Message,), { - 'DESCRIPTOR' : _SNAPSHOTDESCRIPTION, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.SnapshotDescription) - }) -_sym_db.RegisterMessage(SnapshotDescription) - -CreateSnapshotResponse = _reflection.GeneratedProtocolMessageType('CreateSnapshotResponse', (_message.Message,), { - 'DESCRIPTOR' : _CREATESNAPSHOTRESPONSE, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.CreateSnapshotResponse) - }) -_sym_db.RegisterMessage(CreateSnapshotResponse) - -ListSnapshotsResponse = _reflection.GeneratedProtocolMessageType('ListSnapshotsResponse', (_message.Message,), { - 'DESCRIPTOR' : _LISTSNAPSHOTSRESPONSE, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.ListSnapshotsResponse) - }) -_sym_db.RegisterMessage(ListSnapshotsResponse) - -DeleteSnapshotResponse = _reflection.GeneratedProtocolMessageType('DeleteSnapshotResponse', (_message.Message,), { - 'DESCRIPTOR' : _DELETESNAPSHOTRESPONSE, - '__module__' : 'snapshots_service_pb2' - # @@protoc_insertion_point(class_scope:qdrant.DeleteSnapshotResponse) - }) -_sym_db.RegisterMessage(DeleteSnapshotResponse) - -_SNAPSHOTS = DESCRIPTOR.services_by_name['Snapshots'] +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'snapshots_service_pb2', _globals) if _descriptor._USE_C_DESCRIPTORS == False: - - DESCRIPTOR._options = None - DESCRIPTOR._serialized_options = b'\252\002\022Qdrant.Client.Grpc' - _CREATEFULLSNAPSHOTREQUEST._serialized_start=68 - _CREATEFULLSNAPSHOTREQUEST._serialized_end=95 - _LISTFULLSNAPSHOTSREQUEST._serialized_start=97 - _LISTFULLSNAPSHOTSREQUEST._serialized_end=123 - _DELETEFULLSNAPSHOTREQUEST._serialized_start=125 - _DELETEFULLSNAPSHOTREQUEST._serialized_end=175 - _CREATESNAPSHOTREQUEST._serialized_start=177 - _CREATESNAPSHOTREQUEST._serialized_end=225 - _LISTSNAPSHOTSREQUEST._serialized_start=227 - _LISTSNAPSHOTSREQUEST._serialized_end=274 - _DELETESNAPSHOTREQUEST._serialized_start=276 - _DELETESNAPSHOTREQUEST._serialized_end=347 - _SNAPSHOTDESCRIPTION._serialized_start=350 - _SNAPSHOTDESCRIPTION._serialized_end=486 - _CREATESNAPSHOTRESPONSE._serialized_start=488 - _CREATESNAPSHOTRESPONSE._serialized_end=585 - _LISTSNAPSHOTSRESPONSE._serialized_start=587 - _LISTSNAPSHOTSRESPONSE._serialized_end=684 - _DELETESNAPSHOTRESPONSE._serialized_start=686 - _DELETESNAPSHOTRESPONSE._serialized_end=724 - _SNAPSHOTS._serialized_start=727 - _SNAPSHOTS._serialized_end=1204 + _globals['DESCRIPTOR']._options = None + _globals['DESCRIPTOR']._serialized_options = b'\252\002\022Qdrant.Client.Grpc' + _globals['_CREATEFULLSNAPSHOTREQUEST']._serialized_start=68 + _globals['_CREATEFULLSNAPSHOTREQUEST']._serialized_end=95 + _globals['_LISTFULLSNAPSHOTSREQUEST']._serialized_start=97 + _globals['_LISTFULLSNAPSHOTSREQUEST']._serialized_end=123 + _globals['_DELETEFULLSNAPSHOTREQUEST']._serialized_start=125 + _globals['_DELETEFULLSNAPSHOTREQUEST']._serialized_end=175 + _globals['_CREATESNAPSHOTREQUEST']._serialized_start=177 + _globals['_CREATESNAPSHOTREQUEST']._serialized_end=225 + _globals['_LISTSNAPSHOTSREQUEST']._serialized_start=227 + _globals['_LISTSNAPSHOTSREQUEST']._serialized_end=274 + _globals['_DELETESNAPSHOTREQUEST']._serialized_start=276 + _globals['_DELETESNAPSHOTREQUEST']._serialized_end=347 + _globals['_SNAPSHOTDESCRIPTION']._serialized_start=350 + _globals['_SNAPSHOTDESCRIPTION']._serialized_end=486 + _globals['_CREATESNAPSHOTRESPONSE']._serialized_start=488 + _globals['_CREATESNAPSHOTRESPONSE']._serialized_end=585 + _globals['_LISTSNAPSHOTSRESPONSE']._serialized_start=587 + _globals['_LISTSNAPSHOTSRESPONSE']._serialized_end=684 + _globals['_DELETESNAPSHOTRESPONSE']._serialized_start=686 + _globals['_DELETESNAPSHOTRESPONSE']._serialized_end=724 + _globals['_SNAPSHOTS']._serialized_start=727 + _globals['_SNAPSHOTS']._serialized_end=1204 # @@protoc_insertion_point(module_scope) diff --git a/qdrant_client/http/api/aliases_api.py b/qdrant_client/http/api/aliases_api.py index 69f1c6a4..14dc7814 100644 --- a/qdrant_client/http/api/aliases_api.py +++ b/qdrant_client/http/api/aliases_api.py @@ -65,7 +65,7 @@ def _build_for_get_collection_aliases( headers = {} return self.api_client.request( - type_=m.InlineResponse2009, + type_=m.InlineResponse2008, method="GET", url="/collections/{collection_name}/aliases", headers=headers if headers else None, @@ -80,7 +80,7 @@ def _build_for_get_collections_aliases( """ headers = {} return self.api_client.request( - type_=m.InlineResponse2009, + type_=m.InlineResponse2008, method="GET", url="/aliases", headers=headers if headers else None, @@ -113,7 +113,7 @@ class AsyncAliasesApi(_AliasesApi): async def get_collection_aliases( self, collection_name: str, - ) -> m.InlineResponse2009: + ) -> m.InlineResponse2008: """ Get list of all aliases for a collection """ @@ -123,7 +123,7 @@ async def get_collection_aliases( async def get_collections_aliases( self, - ) -> m.InlineResponse2009: + ) -> m.InlineResponse2008: """ Get list of all existing collections aliases """ @@ -144,7 +144,7 @@ class SyncAliasesApi(_AliasesApi): def get_collection_aliases( self, collection_name: str, - ) -> m.InlineResponse2009: + ) -> m.InlineResponse2008: """ Get list of all aliases for a collection """ @@ -154,7 +154,7 @@ def get_collection_aliases( def get_collections_aliases( self, - ) -> m.InlineResponse2009: + ) -> m.InlineResponse2008: """ Get list of all existing collections aliases """ diff --git a/qdrant_client/http/api/collections_api.py b/qdrant_client/http/api/collections_api.py index 52c155d0..fc4cb321 100644 --- a/qdrant_client/http/api/collections_api.py +++ b/qdrant_client/http/api/collections_api.py @@ -65,7 +65,7 @@ def _build_for_collection_exists( headers = {} return self.api_client.request( - type_=m.InlineResponse2007, + type_=m.InlineResponse2006, method="GET", url="/collections/{collection_name}/exists", headers=headers if headers else None, @@ -142,7 +142,7 @@ def _build_for_get_collection( headers = {} return self.api_client.request( - type_=m.InlineResponse2005, + type_=m.InlineResponse2004, method="GET", url="/collections/{collection_name}", headers=headers if headers else None, @@ -157,7 +157,7 @@ def _build_for_get_collections( """ headers = {} return self.api_client.request( - type_=m.InlineResponse2004, + type_=m.InlineResponse2003, method="GET", url="/collections", headers=headers if headers else None, @@ -199,7 +199,7 @@ class AsyncCollectionsApi(_CollectionsApi): async def collection_exists( self, collection_name: str, - ) -> m.InlineResponse2007: + ) -> m.InlineResponse2006: """ Returns \"true\" if the given collection name exists, and \"false\" otherwise """ @@ -238,7 +238,7 @@ async def delete_collection( async def get_collection( self, collection_name: str, - ) -> m.InlineResponse2005: + ) -> m.InlineResponse2004: """ Get detailed information about specified existing collection """ @@ -248,7 +248,7 @@ async def get_collection( async def get_collections( self, - ) -> m.InlineResponse2004: + ) -> m.InlineResponse2003: """ Get list name of all existing collections """ @@ -274,7 +274,7 @@ class SyncCollectionsApi(_CollectionsApi): def collection_exists( self, collection_name: str, - ) -> m.InlineResponse2007: + ) -> m.InlineResponse2006: """ Returns \"true\" if the given collection name exists, and \"false\" otherwise """ @@ -313,7 +313,7 @@ def delete_collection( def get_collection( self, collection_name: str, - ) -> m.InlineResponse2005: + ) -> m.InlineResponse2004: """ Get detailed information about specified existing collection """ @@ -323,7 +323,7 @@ def get_collection( def get_collections( self, - ) -> m.InlineResponse2004: + ) -> m.InlineResponse2003: """ Get list name of all existing collections """ diff --git a/qdrant_client/http/api/distributed_api.py b/qdrant_client/http/api/distributed_api.py index f7a360f5..d3fbe217 100644 --- a/qdrant_client/http/api/distributed_api.py +++ b/qdrant_client/http/api/distributed_api.py @@ -60,7 +60,7 @@ def _build_for_cluster_status( """ headers = {} return self.api_client.request( - type_=m.InlineResponse2003, + type_=m.InlineResponse2002, method="GET", url="/cluster", headers=headers if headers else None, @@ -79,7 +79,7 @@ def _build_for_collection_cluster_info( headers = {} return self.api_client.request( - type_=m.InlineResponse2008, + type_=m.InlineResponse2007, method="GET", url="/collections/{collection_name}/cluster", headers=headers if headers else None, @@ -156,6 +156,7 @@ def _build_for_recover_current_peer( def _build_for_remove_peer( self, peer_id: int, + timeout: int = None, force: bool = None, ): """ @@ -166,6 +167,8 @@ def _build_for_remove_peer( } query_params = {} + if timeout is not None: + query_params["timeout"] = str(timeout) if force is not None: query_params["force"] = str(force).lower() @@ -211,7 +214,7 @@ def _build_for_update_collection_cluster( class AsyncDistributedApi(_DistributedApi): async def cluster_status( self, - ) -> m.InlineResponse2003: + ) -> m.InlineResponse2002: """ Get information about the current state and composition of the cluster """ @@ -220,7 +223,7 @@ async def cluster_status( async def collection_cluster_info( self, collection_name: str, - ) -> m.InlineResponse2008: + ) -> m.InlineResponse2007: """ Get cluster information for a collection """ @@ -260,6 +263,7 @@ async def recover_current_peer( async def remove_peer( self, peer_id: int, + timeout: int = None, force: bool = None, ) -> m.InlineResponse200: """ @@ -267,6 +271,7 @@ async def remove_peer( """ return await self._build_for_remove_peer( peer_id=peer_id, + timeout=timeout, force=force, ) @@ -286,7 +291,7 @@ async def update_collection_cluster( class SyncDistributedApi(_DistributedApi): def cluster_status( self, - ) -> m.InlineResponse2003: + ) -> m.InlineResponse2002: """ Get information about the current state and composition of the cluster """ @@ -295,7 +300,7 @@ def cluster_status( def collection_cluster_info( self, collection_name: str, - ) -> m.InlineResponse2008: + ) -> m.InlineResponse2007: """ Get cluster information for a collection """ @@ -335,6 +340,7 @@ def recover_current_peer( def remove_peer( self, peer_id: int, + timeout: int = None, force: bool = None, ) -> m.InlineResponse200: """ @@ -342,6 +348,7 @@ def remove_peer( """ return self._build_for_remove_peer( peer_id=peer_id, + timeout=timeout, force=force, ) diff --git a/qdrant_client/http/api/indexes_api.py b/qdrant_client/http/api/indexes_api.py index 5bc4e9de..22822e22 100644 --- a/qdrant_client/http/api/indexes_api.py +++ b/qdrant_client/http/api/indexes_api.py @@ -77,7 +77,7 @@ def _build_for_create_field_index( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="PUT", url="/collections/{collection_name}/index", headers=headers if headers else None, @@ -109,7 +109,7 @@ def _build_for_delete_field_index( headers = {} return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="DELETE", url="/collections/{collection_name}/index/{field_name}", headers=headers if headers else None, @@ -125,7 +125,7 @@ async def create_field_index( wait: bool = None, ordering: WriteOrdering = None, create_field_index: m.CreateFieldIndex = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Create index for field in collection """ @@ -142,7 +142,7 @@ async def delete_field_index( field_name: str, wait: bool = None, ordering: WriteOrdering = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete field index for collection """ @@ -161,7 +161,7 @@ def create_field_index( wait: bool = None, ordering: WriteOrdering = None, create_field_index: m.CreateFieldIndex = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Create index for field in collection """ @@ -178,7 +178,7 @@ def delete_field_index( field_name: str, wait: bool = None, ordering: WriteOrdering = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete field index for collection """ diff --git a/qdrant_client/http/api/points_api.py b/qdrant_client/http/api/points_api.py index df1cee4a..77e49990 100644 --- a/qdrant_client/http/api/points_api.py +++ b/qdrant_client/http/api/points_api.py @@ -77,7 +77,7 @@ def _build_for_batch_update( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20015, + type_=m.InlineResponse20014, method="POST", url="/collections/{collection_name}/points/batch", headers=headers if headers else None, @@ -111,7 +111,7 @@ def _build_for_clear_payload( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="POST", url="/collections/{collection_name}/points/payload/clear", headers=headers if headers else None, @@ -123,6 +123,7 @@ def _build_for_clear_payload( def _build_for_count_points( self, collection_name: str, + consistency: m.ReadConsistency = None, timeout: int = None, count_request: m.CountRequest = None, ): @@ -134,6 +135,8 @@ def _build_for_count_points( } query_params = {} + if consistency is not None: + query_params["consistency"] = str(consistency) if timeout is not None: query_params["timeout"] = str(timeout) @@ -142,7 +145,7 @@ def _build_for_count_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20020, + type_=m.InlineResponse20019, method="POST", url="/collections/{collection_name}/points/count", headers=headers if headers else None, @@ -176,7 +179,7 @@ def _build_for_delete_payload( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="POST", url="/collections/{collection_name}/points/payload/delete", headers=headers if headers else None, @@ -210,7 +213,7 @@ def _build_for_delete_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="POST", url="/collections/{collection_name}/points/delete", headers=headers if headers else None, @@ -244,7 +247,7 @@ def _build_for_delete_vectors( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="POST", url="/collections/{collection_name}/points/vectors/delete", headers=headers if headers else None, @@ -256,8 +259,8 @@ def _build_for_delete_vectors( def _build_for_facet( self, collection_name: str, - timeout: int = None, consistency: m.ReadConsistency = None, + timeout: int = None, facet_request: m.FacetRequest = None, ): """ @@ -268,17 +271,17 @@ def _build_for_facet( } query_params = {} - if timeout is not None: - query_params["timeout"] = str(timeout) if consistency is not None: query_params["consistency"] = str(consistency) + if timeout is not None: + query_params["timeout"] = str(timeout) headers = {} body = jsonable_encoder(facet_request) if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20021, + type_=m.InlineResponse20020, method="POST", url="/collections/{collection_name}/facet", headers=headers if headers else None, @@ -307,7 +310,7 @@ def _build_for_get_point( headers = {} return self.api_client.request( - type_=m.InlineResponse20013, + type_=m.InlineResponse20012, method="GET", url="/collections/{collection_name}/points/{id}", headers=headers if headers else None, @@ -340,7 +343,7 @@ def _build_for_get_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20014, + type_=m.InlineResponse20013, method="POST", url="/collections/{collection_name}/points", headers=headers if headers else None, @@ -374,7 +377,7 @@ def _build_for_overwrite_payload( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="PUT", url="/collections/{collection_name}/points/payload", headers=headers if headers else None, @@ -408,7 +411,7 @@ def _build_for_scroll_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20016, + type_=m.InlineResponse20015, method="POST", url="/collections/{collection_name}/points/scroll", headers=headers if headers else None, @@ -442,7 +445,7 @@ def _build_for_set_payload( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="POST", url="/collections/{collection_name}/points/payload", headers=headers if headers else None, @@ -476,7 +479,7 @@ def _build_for_update_vectors( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="PUT", url="/collections/{collection_name}/points/vectors", headers=headers if headers else None, @@ -510,7 +513,7 @@ def _build_for_upsert_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse2006, + type_=m.InlineResponse2005, method="PUT", url="/collections/{collection_name}/points", headers=headers if headers else None, @@ -527,7 +530,7 @@ async def batch_update( wait: bool = None, ordering: WriteOrdering = None, update_operations: m.UpdateOperations = None, - ) -> m.InlineResponse20015: + ) -> m.InlineResponse20014: """ Apply a series of update operations for points, vectors and payloads """ @@ -544,7 +547,7 @@ async def clear_payload( wait: bool = None, ordering: WriteOrdering = None, points_selector: m.PointsSelector = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Remove all payload for specified points """ @@ -558,14 +561,16 @@ async def clear_payload( async def count_points( self, collection_name: str, + consistency: m.ReadConsistency = None, timeout: int = None, count_request: m.CountRequest = None, - ) -> m.InlineResponse20020: + ) -> m.InlineResponse20019: """ Count points which matches given filtering condition """ return await self._build_for_count_points( collection_name=collection_name, + consistency=consistency, timeout=timeout, count_request=count_request, ) @@ -576,7 +581,7 @@ async def delete_payload( wait: bool = None, ordering: WriteOrdering = None, delete_payload: m.DeletePayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete specified key payload for points """ @@ -593,7 +598,7 @@ async def delete_points( wait: bool = None, ordering: WriteOrdering = None, points_selector: m.PointsSelector = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete points """ @@ -610,7 +615,7 @@ async def delete_vectors( wait: bool = None, ordering: WriteOrdering = None, delete_vectors: m.DeleteVectors = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete named vectors from the given points. """ @@ -624,17 +629,17 @@ async def delete_vectors( async def facet( self, collection_name: str, - timeout: int = None, consistency: m.ReadConsistency = None, + timeout: int = None, facet_request: m.FacetRequest = None, - ) -> m.InlineResponse20021: + ) -> m.InlineResponse20020: """ Count points that satisfy the given filter for each unique value of a payload key. """ return await self._build_for_facet( collection_name=collection_name, - timeout=timeout, consistency=consistency, + timeout=timeout, facet_request=facet_request, ) @@ -643,7 +648,7 @@ async def get_point( collection_name: str, id: m.ExtendedPointId, consistency: m.ReadConsistency = None, - ) -> m.InlineResponse20013: + ) -> m.InlineResponse20012: """ Retrieve full information of single point by id """ @@ -659,7 +664,7 @@ async def get_points( consistency: m.ReadConsistency = None, timeout: int = None, point_request: m.PointRequest = None, - ) -> m.InlineResponse20014: + ) -> m.InlineResponse20013: """ Retrieve multiple points by specified IDs """ @@ -676,7 +681,7 @@ async def overwrite_payload( wait: bool = None, ordering: WriteOrdering = None, set_payload: m.SetPayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Replace full payload of points with new one """ @@ -693,7 +698,7 @@ async def scroll_points( consistency: m.ReadConsistency = None, timeout: int = None, scroll_request: m.ScrollRequest = None, - ) -> m.InlineResponse20016: + ) -> m.InlineResponse20015: """ Scroll request - paginate over all points which matches given filtering condition """ @@ -710,7 +715,7 @@ async def set_payload( wait: bool = None, ordering: WriteOrdering = None, set_payload: m.SetPayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Set payload values for points """ @@ -727,7 +732,7 @@ async def update_vectors( wait: bool = None, ordering: WriteOrdering = None, update_vectors: m.UpdateVectors = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Update specified named vectors on points, keep unspecified vectors intact. """ @@ -744,7 +749,7 @@ async def upsert_points( wait: bool = None, ordering: WriteOrdering = None, point_insert_operations: m.PointInsertOperations = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Perform insert + updates on points. If point with given ID already exists - it will be overwritten. """ @@ -763,7 +768,7 @@ def batch_update( wait: bool = None, ordering: WriteOrdering = None, update_operations: m.UpdateOperations = None, - ) -> m.InlineResponse20015: + ) -> m.InlineResponse20014: """ Apply a series of update operations for points, vectors and payloads """ @@ -780,7 +785,7 @@ def clear_payload( wait: bool = None, ordering: WriteOrdering = None, points_selector: m.PointsSelector = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Remove all payload for specified points """ @@ -794,14 +799,16 @@ def clear_payload( def count_points( self, collection_name: str, + consistency: m.ReadConsistency = None, timeout: int = None, count_request: m.CountRequest = None, - ) -> m.InlineResponse20020: + ) -> m.InlineResponse20019: """ Count points which matches given filtering condition """ return self._build_for_count_points( collection_name=collection_name, + consistency=consistency, timeout=timeout, count_request=count_request, ) @@ -812,7 +819,7 @@ def delete_payload( wait: bool = None, ordering: WriteOrdering = None, delete_payload: m.DeletePayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete specified key payload for points """ @@ -829,7 +836,7 @@ def delete_points( wait: bool = None, ordering: WriteOrdering = None, points_selector: m.PointsSelector = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete points """ @@ -846,7 +853,7 @@ def delete_vectors( wait: bool = None, ordering: WriteOrdering = None, delete_vectors: m.DeleteVectors = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Delete named vectors from the given points. """ @@ -860,17 +867,17 @@ def delete_vectors( def facet( self, collection_name: str, - timeout: int = None, consistency: m.ReadConsistency = None, + timeout: int = None, facet_request: m.FacetRequest = None, - ) -> m.InlineResponse20021: + ) -> m.InlineResponse20020: """ Count points that satisfy the given filter for each unique value of a payload key. """ return self._build_for_facet( collection_name=collection_name, - timeout=timeout, consistency=consistency, + timeout=timeout, facet_request=facet_request, ) @@ -879,7 +886,7 @@ def get_point( collection_name: str, id: m.ExtendedPointId, consistency: m.ReadConsistency = None, - ) -> m.InlineResponse20013: + ) -> m.InlineResponse20012: """ Retrieve full information of single point by id """ @@ -895,7 +902,7 @@ def get_points( consistency: m.ReadConsistency = None, timeout: int = None, point_request: m.PointRequest = None, - ) -> m.InlineResponse20014: + ) -> m.InlineResponse20013: """ Retrieve multiple points by specified IDs """ @@ -912,7 +919,7 @@ def overwrite_payload( wait: bool = None, ordering: WriteOrdering = None, set_payload: m.SetPayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Replace full payload of points with new one """ @@ -929,7 +936,7 @@ def scroll_points( consistency: m.ReadConsistency = None, timeout: int = None, scroll_request: m.ScrollRequest = None, - ) -> m.InlineResponse20016: + ) -> m.InlineResponse20015: """ Scroll request - paginate over all points which matches given filtering condition """ @@ -946,7 +953,7 @@ def set_payload( wait: bool = None, ordering: WriteOrdering = None, set_payload: m.SetPayload = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Set payload values for points """ @@ -963,7 +970,7 @@ def update_vectors( wait: bool = None, ordering: WriteOrdering = None, update_vectors: m.UpdateVectors = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Update specified named vectors on points, keep unspecified vectors intact. """ @@ -980,7 +987,7 @@ def upsert_points( wait: bool = None, ordering: WriteOrdering = None, point_insert_operations: m.PointInsertOperations = None, - ) -> m.InlineResponse2006: + ) -> m.InlineResponse2005: """ Perform insert + updates on points. If point with given ID already exists - it will be overwritten. """ diff --git a/qdrant_client/http/api/search_api.py b/qdrant_client/http/api/search_api.py index 35e1fb31..9addb279 100644 --- a/qdrant_client/http/api/search_api.py +++ b/qdrant_client/http/api/search_api.py @@ -77,7 +77,7 @@ def _build_for_discover_batch_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20018, + type_=m.InlineResponse20017, method="POST", url="/collections/{collection_name}/points/discover/batch", headers=headers if headers else None, @@ -94,7 +94,7 @@ def _build_for_discover_points( discover_request: m.DiscoverRequest = None, ): """ - Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. + Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. """ path_params = { "collection_name": str(collection_name), @@ -111,7 +111,7 @@ def _build_for_discover_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20017, + type_=m.InlineResponse20016, method="POST", url="/collections/{collection_name}/points/discover", headers=headers if headers else None, @@ -145,7 +145,7 @@ def _build_for_query_batch_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20023, + type_=m.InlineResponse20022, method="POST", url="/collections/{collection_name}/points/query/batch", headers=headers if headers else None, @@ -179,7 +179,7 @@ def _build_for_query_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20022, + type_=m.InlineResponse20021, method="POST", url="/collections/{collection_name}/points/query", headers=headers if headers else None, @@ -213,7 +213,7 @@ def _build_for_query_points_groups( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20019, + type_=m.InlineResponse20018, method="POST", url="/collections/{collection_name}/points/query/groups", headers=headers if headers else None, @@ -247,7 +247,7 @@ def _build_for_recommend_batch_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20018, + type_=m.InlineResponse20017, method="POST", url="/collections/{collection_name}/points/recommend/batch", headers=headers if headers else None, @@ -281,7 +281,7 @@ def _build_for_recommend_point_groups( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20019, + type_=m.InlineResponse20018, method="POST", url="/collections/{collection_name}/points/recommend/groups", headers=headers if headers else None, @@ -315,7 +315,7 @@ def _build_for_recommend_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20017, + type_=m.InlineResponse20016, method="POST", url="/collections/{collection_name}/points/recommend", headers=headers if headers else None, @@ -349,7 +349,7 @@ def _build_for_search_batch_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20018, + type_=m.InlineResponse20017, method="POST", url="/collections/{collection_name}/points/search/batch", headers=headers if headers else None, @@ -383,7 +383,7 @@ def _build_for_search_matrix_offsets( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20025, + type_=m.InlineResponse20024, method="POST", url="/collections/{collection_name}/points/search/matrix/offsets", headers=headers if headers else None, @@ -417,7 +417,7 @@ def _build_for_search_matrix_pairs( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20024, + type_=m.InlineResponse20023, method="POST", url="/collections/{collection_name}/points/search/matrix/pairs", headers=headers if headers else None, @@ -451,7 +451,7 @@ def _build_for_search_point_groups( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20019, + type_=m.InlineResponse20018, method="POST", url="/collections/{collection_name}/points/search/groups", headers=headers if headers else None, @@ -485,7 +485,7 @@ def _build_for_search_points( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20017, + type_=m.InlineResponse20016, method="POST", url="/collections/{collection_name}/points/search", headers=headers if headers else None, @@ -502,7 +502,7 @@ async def discover_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, discover_request_batch: m.DiscoverRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Look for points based on target and/or positive and negative example pairs, in batch. """ @@ -519,9 +519,9 @@ async def discover_points( consistency: m.ReadConsistency = None, timeout: int = None, discover_request: m.DiscoverRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ - Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. + Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. """ return await self._build_for_discover_points( collection_name=collection_name, @@ -536,7 +536,7 @@ async def query_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, query_request_batch: m.QueryRequestBatch = None, - ) -> m.InlineResponse20023: + ) -> m.InlineResponse20022: """ Universally query points in batch. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. """ @@ -553,7 +553,7 @@ async def query_points( consistency: m.ReadConsistency = None, timeout: int = None, query_request: m.QueryRequest = None, - ) -> m.InlineResponse20022: + ) -> m.InlineResponse20021: """ Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. """ @@ -570,7 +570,7 @@ async def query_points_groups( consistency: m.ReadConsistency = None, timeout: int = None, query_groups_request: m.QueryGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Universally query points, grouped by a given payload field """ @@ -587,7 +587,7 @@ async def recommend_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, recommend_request_batch: m.RecommendRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples. """ @@ -604,7 +604,7 @@ async def recommend_point_groups( consistency: m.ReadConsistency = None, timeout: int = None, recommend_groups_request: m.RecommendGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given payload field. """ @@ -621,7 +621,7 @@ async def recommend_points( consistency: m.ReadConsistency = None, timeout: int = None, recommend_request: m.RecommendRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples. """ @@ -638,7 +638,7 @@ async def search_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, search_request_batch: m.SearchRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Retrieve by batch the closest points based on vector similarity and given filtering conditions """ @@ -655,7 +655,7 @@ async def search_matrix_offsets( consistency: m.ReadConsistency = None, timeout: int = None, search_matrix_request: m.SearchMatrixRequest = None, - ) -> m.InlineResponse20025: + ) -> m.InlineResponse20024: """ Compute distance matrix for sampled points with an offset based output format """ @@ -672,7 +672,7 @@ async def search_matrix_pairs( consistency: m.ReadConsistency = None, timeout: int = None, search_matrix_request: m.SearchMatrixRequest = None, - ) -> m.InlineResponse20024: + ) -> m.InlineResponse20023: """ Compute distance matrix for sampled points with a pair based output format """ @@ -689,7 +689,7 @@ async def search_point_groups( consistency: m.ReadConsistency = None, timeout: int = None, search_groups_request: m.SearchGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given payload field """ @@ -706,7 +706,7 @@ async def search_points( consistency: m.ReadConsistency = None, timeout: int = None, search_request: m.SearchRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ Retrieve closest points based on vector similarity and given filtering conditions """ @@ -725,7 +725,7 @@ def discover_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, discover_request_batch: m.DiscoverRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Look for points based on target and/or positive and negative example pairs, in batch. """ @@ -742,9 +742,9 @@ def discover_points( consistency: m.ReadConsistency = None, timeout: int = None, discover_request: m.DiscoverRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ - Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. + Use context and a target to find the most similar points to the target, constrained by the context. When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. """ return self._build_for_discover_points( collection_name=collection_name, @@ -759,7 +759,7 @@ def query_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, query_request_batch: m.QueryRequestBatch = None, - ) -> m.InlineResponse20023: + ) -> m.InlineResponse20022: """ Universally query points in batch. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. """ @@ -776,7 +776,7 @@ def query_points( consistency: m.ReadConsistency = None, timeout: int = None, query_request: m.QueryRequest = None, - ) -> m.InlineResponse20022: + ) -> m.InlineResponse20021: """ Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries. """ @@ -793,7 +793,7 @@ def query_points_groups( consistency: m.ReadConsistency = None, timeout: int = None, query_groups_request: m.QueryGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Universally query points, grouped by a given payload field """ @@ -810,7 +810,7 @@ def recommend_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, recommend_request_batch: m.RecommendRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples. """ @@ -827,7 +827,7 @@ def recommend_point_groups( consistency: m.ReadConsistency = None, timeout: int = None, recommend_groups_request: m.RecommendGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given payload field. """ @@ -844,7 +844,7 @@ def recommend_points( consistency: m.ReadConsistency = None, timeout: int = None, recommend_request: m.RecommendRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ Look for the points which are closer to stored positive examples and at the same time further to negative examples. """ @@ -861,7 +861,7 @@ def search_batch_points( consistency: m.ReadConsistency = None, timeout: int = None, search_request_batch: m.SearchRequestBatch = None, - ) -> m.InlineResponse20018: + ) -> m.InlineResponse20017: """ Retrieve by batch the closest points based on vector similarity and given filtering conditions """ @@ -878,7 +878,7 @@ def search_matrix_offsets( consistency: m.ReadConsistency = None, timeout: int = None, search_matrix_request: m.SearchMatrixRequest = None, - ) -> m.InlineResponse20025: + ) -> m.InlineResponse20024: """ Compute distance matrix for sampled points with an offset based output format """ @@ -895,7 +895,7 @@ def search_matrix_pairs( consistency: m.ReadConsistency = None, timeout: int = None, search_matrix_request: m.SearchMatrixRequest = None, - ) -> m.InlineResponse20024: + ) -> m.InlineResponse20023: """ Compute distance matrix for sampled points with a pair based output format """ @@ -912,7 +912,7 @@ def search_point_groups( consistency: m.ReadConsistency = None, timeout: int = None, search_groups_request: m.SearchGroupsRequest = None, - ) -> m.InlineResponse20019: + ) -> m.InlineResponse20018: """ Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given payload field """ @@ -929,7 +929,7 @@ def search_points( consistency: m.ReadConsistency = None, timeout: int = None, search_request: m.SearchRequest = None, - ) -> m.InlineResponse20017: + ) -> m.InlineResponse20016: """ Retrieve closest points based on vector similarity and given filtering conditions """ diff --git a/qdrant_client/http/api/service_api.py b/qdrant_client/http/api/service_api.py index e6942aa1..05485b24 100644 --- a/qdrant_client/http/api/service_api.py +++ b/qdrant_client/http/api/service_api.py @@ -52,20 +52,6 @@ class _ServiceApi: def __init__(self, api_client: "Union[ApiClient, AsyncApiClient]"): self.api_client = api_client - def _build_for_get_locks( - self, - ): - """ - Get lock options. If write is locked, all write operations and collection creation are forbidden - """ - headers = {} - return self.api_client.request( - type_=m.InlineResponse2002, - method="GET", - url="/locks", - headers=headers if headers else None, - ) - def _build_for_healthz( self, ): @@ -114,25 +100,6 @@ def _build_for_metrics( params=query_params, ) - def _build_for_post_locks( - self, - locks_option: m.LocksOption = None, - ): - """ - Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options - """ - headers = {} - body = jsonable_encoder(locks_option) - if "Content-Type" not in headers: - headers["Content-Type"] = "application/json" - return self.api_client.request( - type_=m.InlineResponse2002, - method="POST", - url="/locks", - headers=headers if headers else None, - content=body, - ) - def _build_for_readyz( self, ): @@ -186,14 +153,6 @@ def _build_for_telemetry( class AsyncServiceApi(_ServiceApi): - async def get_locks( - self, - ) -> m.InlineResponse2002: - """ - Get lock options. If write is locked, all write operations and collection creation are forbidden - """ - return await self._build_for_get_locks() - async def healthz( self, ) -> str: @@ -221,17 +180,6 @@ async def metrics( anonymize=anonymize, ) - async def post_locks( - self, - locks_option: m.LocksOption = None, - ) -> m.InlineResponse2002: - """ - Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options - """ - return await self._build_for_post_locks( - locks_option=locks_option, - ) - async def readyz( self, ) -> str: @@ -263,14 +211,6 @@ async def telemetry( class SyncServiceApi(_ServiceApi): - def get_locks( - self, - ) -> m.InlineResponse2002: - """ - Get lock options. If write is locked, all write operations and collection creation are forbidden - """ - return self._build_for_get_locks() - def healthz( self, ) -> str: @@ -298,17 +238,6 @@ def metrics( anonymize=anonymize, ) - def post_locks( - self, - locks_option: m.LocksOption = None, - ) -> m.InlineResponse2002: - """ - Set lock options. If write is locked, all write operations and collection creation are forbidden. Returns previous lock options - """ - return self._build_for_post_locks( - locks_option=locks_option, - ) - def readyz( self, ) -> str: diff --git a/qdrant_client/http/api/snapshots_api.py b/qdrant_client/http/api/snapshots_api.py index 460edbba..16ae5e2b 100644 --- a/qdrant_client/http/api/snapshots_api.py +++ b/qdrant_client/http/api/snapshots_api.py @@ -65,7 +65,7 @@ def _build_for_create_full_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20012, + type_=m.InlineResponse20011, method="POST", url="/snapshots", headers=headers if headers else None, @@ -92,7 +92,7 @@ def _build_for_create_shard_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20012, + type_=m.InlineResponse20011, method="POST", url="/collections/{collection_name}/shards/{shard_id}/snapshots", headers=headers if headers else None, @@ -118,7 +118,7 @@ def _build_for_create_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20012, + type_=m.InlineResponse20011, method="POST", url="/collections/{collection_name}/snapshots", headers=headers if headers else None, @@ -144,7 +144,7 @@ def _build_for_delete_full_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="DELETE", url="/snapshots/{snapshot_name}", headers=headers if headers else None, @@ -174,7 +174,7 @@ def _build_for_delete_shard_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="DELETE", url="/collections/{collection_name}/shards/{shard_id}/snapshots/{snapshot_name}", headers=headers if headers else None, @@ -202,7 +202,7 @@ def _build_for_delete_snapshot( headers = {} return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="DELETE", url="/collections/{collection_name}/snapshots/{snapshot_name}", headers=headers if headers else None, @@ -284,7 +284,7 @@ def _build_for_list_full_snapshots( """ headers = {} return self.api_client.request( - type_=m.InlineResponse20011, + type_=m.InlineResponse20010, method="GET", url="/snapshots", headers=headers if headers else None, @@ -305,7 +305,7 @@ def _build_for_list_shard_snapshots( headers = {} return self.api_client.request( - type_=m.InlineResponse20011, + type_=m.InlineResponse20010, method="GET", url="/collections/{collection_name}/shards/{shard_id}/snapshots", headers=headers if headers else None, @@ -325,7 +325,7 @@ def _build_for_list_snapshots( headers = {} return self.api_client.request( - type_=m.InlineResponse20011, + type_=m.InlineResponse20010, method="GET", url="/collections/{collection_name}/snapshots", headers=headers if headers else None, @@ -354,7 +354,7 @@ def _build_for_recover_from_snapshot( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="PUT", url="/collections/{collection_name}/snapshots/recover", headers=headers if headers else None, @@ -393,7 +393,7 @@ def _build_for_recover_from_uploaded_snapshot( files["snapshot"] = snapshot return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="POST", url="/collections/{collection_name}/snapshots/upload", headers=headers if headers else None, @@ -427,7 +427,7 @@ def _build_for_recover_shard_from_snapshot( if "Content-Type" not in headers: headers["Content-Type"] = "application/json" return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="PUT", url="/collections/{collection_name}/shards/{shard_id}/snapshots/recover", headers=headers if headers else None, @@ -468,7 +468,7 @@ def _build_for_recover_shard_from_uploaded_snapshot( files["snapshot"] = snapshot return self.api_client.request( - type_=m.InlineResponse20010, + type_=m.InlineResponse2009, method="POST", url="/collections/{collection_name}/shards/{shard_id}/snapshots/upload", headers=headers if headers else None, @@ -483,7 +483,7 @@ class AsyncSnapshotsApi(_SnapshotsApi): async def create_full_snapshot( self, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot of the whole storage """ @@ -496,7 +496,7 @@ async def create_shard_snapshot( collection_name: str, shard_id: int, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot of a shard for a collection """ @@ -510,7 +510,7 @@ async def create_snapshot( self, collection_name: str, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot for a collection """ @@ -523,7 +523,7 @@ async def delete_full_snapshot( self, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot of the whole storage """ @@ -538,7 +538,7 @@ async def delete_shard_snapshot( shard_id: int, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot of a shard for a collection """ @@ -554,7 +554,7 @@ async def delete_snapshot( collection_name: str, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot for a collection """ @@ -605,7 +605,7 @@ async def get_snapshot( async def list_full_snapshots( self, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots of the whole storage """ @@ -615,7 +615,7 @@ async def list_shard_snapshots( self, collection_name: str, shard_id: int, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots for a shard of a collection """ @@ -627,7 +627,7 @@ async def list_shard_snapshots( async def list_snapshots( self, collection_name: str, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots for a collection """ @@ -640,7 +640,7 @@ async def recover_from_snapshot( collection_name: str, wait: bool = None, snapshot_recover: m.SnapshotRecover = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ @@ -657,7 +657,7 @@ async def recover_from_uploaded_snapshot( priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover local collection data from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ @@ -675,7 +675,7 @@ async def recover_shard_from_snapshot( shard_id: int, wait: bool = None, shard_snapshot_recover: m.ShardSnapshotRecover = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover shard of a local collection data from a snapshot. This will overwrite any data, stored in this shard, for the collection. """ @@ -694,7 +694,7 @@ async def recover_shard_from_uploaded_snapshot( priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover shard of a local collection from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection shard. """ @@ -712,7 +712,7 @@ class SyncSnapshotsApi(_SnapshotsApi): def create_full_snapshot( self, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot of the whole storage """ @@ -725,7 +725,7 @@ def create_shard_snapshot( collection_name: str, shard_id: int, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot of a shard for a collection """ @@ -739,7 +739,7 @@ def create_snapshot( self, collection_name: str, wait: bool = None, - ) -> m.InlineResponse20012: + ) -> m.InlineResponse20011: """ Create new snapshot for a collection """ @@ -752,7 +752,7 @@ def delete_full_snapshot( self, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot of the whole storage """ @@ -767,7 +767,7 @@ def delete_shard_snapshot( shard_id: int, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot of a shard for a collection """ @@ -783,7 +783,7 @@ def delete_snapshot( collection_name: str, snapshot_name: str, wait: bool = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Delete snapshot for a collection """ @@ -834,7 +834,7 @@ def get_snapshot( def list_full_snapshots( self, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots of the whole storage """ @@ -844,7 +844,7 @@ def list_shard_snapshots( self, collection_name: str, shard_id: int, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots for a shard of a collection """ @@ -856,7 +856,7 @@ def list_shard_snapshots( def list_snapshots( self, collection_name: str, - ) -> m.InlineResponse20011: + ) -> m.InlineResponse20010: """ Get list of snapshots for a collection """ @@ -869,7 +869,7 @@ def recover_from_snapshot( collection_name: str, wait: bool = None, snapshot_recover: m.SnapshotRecover = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover local collection data from a snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ @@ -886,7 +886,7 @@ def recover_from_uploaded_snapshot( priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover local collection data from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection. If collection does not exist - it will be created. """ @@ -904,7 +904,7 @@ def recover_shard_from_snapshot( shard_id: int, wait: bool = None, shard_snapshot_recover: m.ShardSnapshotRecover = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover shard of a local collection data from a snapshot. This will overwrite any data, stored in this shard, for the collection. """ @@ -923,7 +923,7 @@ def recover_shard_from_uploaded_snapshot( priority: SnapshotPriority = None, checksum: str = None, snapshot: IO[Any] = None, - ) -> m.InlineResponse20010: + ) -> m.InlineResponse2009: """ Recover shard of a local collection from an uploaded snapshot. This will overwrite any data, stored on this node, for the collection shard. """ diff --git a/qdrant_client/http/models/models.py b/qdrant_client/http/models/models.py index e0006bfc..7e42b0c0 100644 --- a/qdrant_client/http/models/models.py +++ b/qdrant_client/http/models/models.py @@ -33,6 +33,21 @@ class AbsExpression(BaseModel, extra="forbid"): abs: "Expression" = Field(..., description="") +class AcornSearchParams(BaseModel, extra="forbid"): + """ + ACORN-related search parameters + """ + + enable: Optional[bool] = Field( + default=False, + description="If true, then ACORN may be used for the HNSW search based on filters selectivity. Improves search recall for searches with multiple low-selectivity payload filters, at cost of performance.", + ) + max_selectivity: Optional[float] = Field( + default=None, + description="Maximum selectivity of filters to enable ACORN. If estimated filters selectivity is higher than this value, ACORN will not be used. Selectivity is estimated as: `estimated number of points satisfying the filters / total number of points`. 0.0 for never, 1.0 for always. Default is 0.4.", + ) + + class AliasDescription(BaseModel): alias_name: str = Field(..., description="") collection_name: str = Field(..., description="") @@ -90,6 +105,52 @@ class BinaryQuantizationQueryEncoding(str, Enum): SCALAR8BITS = "scalar8bits" +class Bm25Config(BaseModel, extra="forbid"): + """ + Configuration of the local bm25 models. + """ + + k: Optional[float] = Field( + default=1.2, + description="Controls term frequency saturation. Higher values mean term frequency has more impact. Default is 1.2", + ) + b: Optional[float] = Field( + default=0.75, + description="Controls document length normalization. Ranges from 0 (no normalization) to 1 (full normalization). Higher values mean longer documents have less impact. Default is 0.75.", + ) + avg_len: Optional[float] = Field( + default=256, description="Expected average document length in the collection. Default is 256." + ) + tokenizer: Optional["TokenizerType"] = Field(default=None, description="Configuration of the local bm25 models.") + language: Optional[str] = Field( + default=None, + description="Defines which language to use for text preprocessing. This parameter is used to construct default stopwords filter and stemmer. To disable language-specific processing, set this to `'language': 'none'`. If not specified, English is assumed.", + ) + lowercase: Optional[bool] = Field( + default=None, description="Lowercase the text before tokenization. Default is `true`." + ) + ascii_folding: Optional[bool] = Field( + default=None, + description="If true, normalize tokens by folding accented characters to ASCII (e.g., 'ação' -> 'acao'). Default is `false`.", + ) + stopwords: Optional["StopwordsInterface"] = Field( + default=None, + description="Configuration of the stopwords filter. Supports list of pre-defined languages and custom stopwords. Default: initialized for specified `language` or English if not specified.", + ) + stemmer: Optional["StemmingAlgorithm"] = Field( + default=None, + description="Configuration of the stemmer. Processes tokens to their root form. Default: initialized Snowball stemmer for specified `language` or English if not specified.", + ) + min_token_len: Optional[int] = Field( + default=None, + description="Minimum token length to keep. If token is shorter than this, it will be discarded. Default is `None`, which means no minimum length.", + ) + max_token_len: Optional[int] = Field( + default=None, + description="Maximum token length to keep. If token is longer than this, it will be discarded. Default is `None`, which means no maximum length.", + ) + + class BoolIndexParams(BaseModel, extra="forbid"): type: "BoolIndexType" = Field(..., description="") on_disk: Optional[bool] = Field(default=None, description="If true, store the index on disk. Default: false.") @@ -160,6 +221,7 @@ class ClusterTelemetry(BaseModel): status: Optional["ClusterStatusTelemetry"] = Field(default=None, description="") config: Optional["ClusterConfigTelemetry"] = Field(default=None, description="") peers: Optional[Dict[str, "PeerInfo"]] = Field(default=None, description="") + peer_metadata: Optional[Dict[str, "PeerMetadata"]] = Field(default=None, description="") metadata: Optional[Dict[str, Any]] = Field(default=None, description="") @@ -193,6 +255,10 @@ class CollectionConfig(BaseModel): strict_mode_config: Optional["StrictModeConfigOutput"] = Field( default=None, description="Information about the collection configuration" ) + metadata: Optional["Payload"] = Field( + default=None, + description="Arbitrary JSON metadata for the collection This can be used to store application-specific information such as creation time, migration data, inference model info, etc.", + ) class CollectionConfigTelemetry(BaseModel): @@ -203,6 +269,7 @@ class CollectionConfigTelemetry(BaseModel): quantization_config: Optional["QuantizationConfig"] = Field(default=None, description="") strict_mode_config: Optional["StrictModeConfigOutput"] = Field(default=None, description="") uuid: Optional[UUID] = Field(default=None, description="") + metadata: Optional["Payload"] = Field(default=None, description="Arbitrary JSON metadata for the collection") class CollectionDescription(BaseModel): @@ -226,9 +293,8 @@ class CollectionInfo(BaseModel): optimizer_status: "OptimizersStatus" = Field( ..., description="Current statistics and configuration of the collection" ) - vectors_count: Optional[int] = Field( - default=None, - description="DEPRECATED: Approximate number of vectors in collection. All vectors in collection are available for querying. Calculated as `points_count x vectors_per_point`. Where `vectors_per_point` is a number of named vectors in schema.", + warnings: Optional[List["CollectionWarning"]] = Field( + default=None, description="Warnings related to the collection" ) indexed_vectors_count: Optional[int] = Field( default=None, @@ -285,6 +351,13 @@ class CollectionParamsDiff(BaseModel, extra="forbid"): ) +class CollectionSnapshotTelemetry(BaseModel): + id: str = Field(..., description="") + running_snapshots: Optional[int] = Field(default=None, description="") + running_snapshot_recovery: Optional[int] = Field(default=None, description="") + total_snapshot_creations: Optional[int] = Field(default=None, description="") + + class CollectionStatus(str, Enum): """ Current state of the collection. `Green` - all good. `Yellow` - optimization is running, 'Grey' - optimizations are possible but not triggered, `Red` - some operations failed and was not recovered @@ -309,6 +382,10 @@ class CollectionTelemetry(BaseModel): shard_clean_tasks: Optional[Dict[str, "ShardCleanStatusTelemetry"]] = Field(default=None, description="") +class CollectionWarning(BaseModel): + message: str = Field(..., description="Warning message") + + class CollectionsAggregatedTelemetry(BaseModel): vectors: int = Field(..., description="") optimizers_status: "OptimizersStatus" = Field(..., description="") @@ -327,6 +404,7 @@ class CollectionsTelemetry(BaseModel): number_of_collections: int = Field(..., description="") max_collections: Optional[int] = Field(default=None, description="") collections: Optional[List["CollectionTelemetryEnum"]] = Field(default=None, description="") + snapshots: Optional[List["CollectionSnapshotTelemetry"]] = Field(default=None, description="") class CompressionRatio(str, Enum): @@ -460,9 +538,6 @@ class CreateCollection(BaseModel, extra="forbid"): default=None, description="Custom params for Optimizers. If none - values from service configuration file are used.", ) - init_from: Optional["InitFrom"] = Field( - default=None, description="Specify other collection to copy data from. Deprecated since Qdrant 1.15.0." - ) quantization_config: Optional["QuantizationConfig"] = Field( default=None, description="Quantization parameters. If none - quantization is disabled." ) @@ -470,6 +545,10 @@ class CreateCollection(BaseModel, extra="forbid"): default=None, description="Sparse vector data config." ) strict_mode_config: Optional["StrictModeConfig"] = Field(default=None, description="Strict-mode config.") + metadata: Optional["Payload"] = Field( + default=None, + description="Arbitrary JSON metadata for the collection This can be used to store application-specific information such as creation time, migration data, inference model info, etc.", + ) class CreateFieldIndex(BaseModel, extra="forbid"): @@ -491,6 +570,10 @@ class CreateShardingKey(BaseModel, extra="forbid"): default=None, description="Placement of shards for this key List of peer ids, that can be used to place shards for this key If not specified, will be randomly placed among all peers", ) + initial_state: Optional["ReplicaState"] = Field( + default=None, + description="Initial state of the shards for this key If not specified, will be `Initializing` first and then `Active` Warning: do not change this unless you know what you are doing", + ) class CreateShardingKeyOperation(BaseModel, extra="forbid"): @@ -546,7 +629,7 @@ class DecayParamsExpression(BaseModel, extra="forbid"): ) midpoint: Optional[float] = Field( default=None, - description="The midpoint of the decay. Defaults to 0.5. Output will be this value when `|x - target| == scale`.", + description="The midpoint of the decay. Should be between 0 and 1.Defaults to 0.5. Output will be this value when `|x - target| == scale`.", ) @@ -699,12 +782,16 @@ class Document(BaseModel, extra="forbid"): WARN: Work-in-progress, unimplemented Text document for embedding. Requires inference infrastructure, unimplemented. """ - text: str = Field(..., description="Text of the document This field will be used as input for the embedding model") + text: str = Field( + ..., description="Text of the document. This field will be used as input for the embedding model." + ) model: str = Field( - ..., description="Name of the model used to generate the vector List of available models depends on a provider" + ..., + description="Name of the model used to generate the vector. List of available models depends on a provider.", ) - options: Optional[Dict[str, Any]] = Field( - default=None, description="Parameters for the model Values of the parameters are model-specific" + options: Optional["DocumentOptions"] = Field( + default=None, + description="Additional options for the model, will be passed to the inference service as-is. See model cards for available options.", ) @@ -767,14 +854,15 @@ class FeatureFlags(BaseModel): ) payload_index_skip_rocksdb: Optional[bool] = Field( default=True, - description="Skip usage of RocksDB in new immutable payload indices. First implemented in Qdrant 1.13.5. Enabled by default in Qdrant 1.14.1", + description="Skip usage of RocksDB in new immutable payload indices. First implemented in Qdrant 1.13.5. Enabled by default in Qdrant 1.14.1.", ) payload_index_skip_mutable_rocksdb: Optional[bool] = Field( - default=False, description="Skip usage of RocksDB in new mutable payload indices." + default=True, + description="Skip usage of RocksDB in new mutable payload indices. First implemented in Qdrant 1.15.0. Enabled by default in Qdrant 1.16.0.", ) payload_storage_skip_rocksdb: Optional[bool] = Field( - default=False, - description="Skip usage of RocksDB in new payload storages. On-disk payload storages never use Gridstore. First implemented in Qdrant 1.15.0.", + default=True, + description="Skip usage of RocksDB in new payload storages. On-disk payload storages never use Gridstore. First implemented in Qdrant 1.15.0. Enabled by default in Qdrant 1.16.0.", ) incremental_hnsw_building: Optional[bool] = Field( default=True, description="Use incremental HNSW building. Enabled by default in Qdrant 1.14.1." @@ -793,6 +881,10 @@ class FeatureFlags(BaseModel): default=False, description="Migrate RocksDB based payload indices into new format on start. Rebuilds a new payload index from scratch.", ) + appendable_quantization: Optional[bool] = Field( + default=True, + description="Use appendable quantization in appendable plain segments. Enabled by default in Qdrant 1.16.0.", + ) class FieldCondition(BaseModel, extra="forbid"): @@ -857,7 +949,7 @@ class FormulaQuery(BaseModel, extra="forbid"): class Fusion(str, Enum): """ - Fusion algorithm allows to combine results of multiple prefetches. Available fusion algorithms: * `rrf` - Reciprocal Rank Fusion * `dbsf` - Distribution-Based Score Fusion + Fusion algorithm allows to combine results of multiple prefetches. Available fusion algorithms: * `rrf` - Reciprocal Rank Fusion (with default parameters) * `dbsf` - Distribution-Based Score Fusion """ def __str__(self) -> str: @@ -1015,7 +1107,7 @@ class HnswConfig(BaseModel): ) full_scan_threshold: int = Field( ..., - description="Minimal size (in KiloBytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256", + description="Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. This measures the total size of vectors being queried against. When the maximum estimated amount of points that a condition satisfies is smaller than `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index traversal for better performance. Note: 1Kb = 1 vector of size 256", ) max_indexing_threads: Optional[int] = Field( default=0, @@ -1029,6 +1121,10 @@ class HnswConfig(BaseModel): default=None, description="Custom M param for hnsw graph built for payload index. If not set, default M will be used.", ) + inline_storage: Optional[bool] = Field( + default=None, + description="Store copies of original and quantized vectors within the HNSW index file. Default: false. Enabling this option will trade the search speed for disk usage by reducing amount of random seeks during the search. Requires quantized vectors to be enabled. Multi-vectors are not supported.", + ) class HnswConfigDiff(BaseModel, extra="forbid"): @@ -1042,7 +1138,7 @@ class HnswConfigDiff(BaseModel, extra="forbid"): ) full_scan_threshold: Optional[int] = Field( default=None, - description="Minimal size (in kilobytes) of vectors for additional payload-based indexing. If payload chunk is smaller than `full_scan_threshold_kb` additional indexing won't be used - in this case full-scan search should be preferred by query planner and additional indexing is not required. Note: 1Kb = 1 vector of size 256", + description="Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. This measures the total size of vectors being queried against. When the maximum estimated amount of points that a condition satisfies is smaller than `full_scan_threshold_kb`, the query planner will use full-scan search instead of HNSW index traversal for better performance. Note: 1Kb = 1 vector of size 256", ) max_indexing_threads: Optional[int] = Field( default=None, @@ -1056,6 +1152,10 @@ class HnswConfigDiff(BaseModel, extra="forbid"): default=None, description="Custom M param for additional payload-aware HNSW links. If not set, default M will be used.", ) + inline_storage: Optional[bool] = Field( + default=None, + description="Store copies of original and quantized vectors within the HNSW index file. Default: false. Enabling this option will trade the search speed for disk usage by reducing amount of random seeks during the search. Requires quantized vectors to be enabled. Multi-vectors are not supported.", + ) class HnswGlobalConfig(BaseModel): @@ -1072,7 +1172,8 @@ class Image(BaseModel, extra="forbid"): image: Any = Field(..., description="Image data: base64 encoded image or an URL") model: str = Field( - ..., description="Name of the model used to generate the vector List of available models depends on a provider" + ..., + description="Name of the model used to generate the vector. List of available models depends on a provider.", ) options: Optional[Dict[str, Any]] = Field( default=None, description="Parameters for the model Values of the parameters are model-specific" @@ -1116,10 +1217,11 @@ class InferenceObject(BaseModel, extra="forbid"): object: Any = Field( ..., - description="Arbitrary data, used as input for the embedding model Used if the model requires more than one input or a custom input", + description="Arbitrary data, used as input for the embedding model. Used if the model requires more than one input or a custom input.", ) model: str = Field( - ..., description="Name of the model used to generate the vector List of available models depends on a provider" + ..., + description="Name of the model used to generate the vector. List of available models depends on a provider.", ) options: Optional[Dict[str, Any]] = Field( default=None, description="Parameters for the model Values of the parameters are model-specific" @@ -1130,16 +1232,6 @@ class InferenceUsage(BaseModel): models: Dict[str, "ModelUsage"] = Field(..., description="") -class InitFrom(BaseModel, extra="forbid"): - """ - Operation for creating new collection and (optionally) specify index params - """ - - collection: str = Field( - ..., description="Operation for creating new collection and (optionally) specify index params" - ) - - class InlineResponse200(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") @@ -1155,116 +1247,110 @@ class InlineResponse2001(BaseModel): class InlineResponse20010(BaseModel): - time: Optional[float] = Field(default=None, description="Time spent to process this request") - status: Optional[str] = Field(default=None, description="") - result: Optional[bool] = Field(default=None, description="") - - -class InlineResponse20011(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List["SnapshotDescription"]] = Field(default=None, description="") -class InlineResponse20012(BaseModel): +class InlineResponse20011(BaseModel): time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["SnapshotDescription"] = Field(default=None, description="") -class InlineResponse20013(BaseModel): +class InlineResponse20012(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["Record"] = Field(default=None, description="") -class InlineResponse20014(BaseModel): +class InlineResponse20013(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List["Record"]] = Field(default=None, description="") -class InlineResponse20015(BaseModel): +class InlineResponse20014(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List["UpdateResult"]] = Field(default=None, description="") -class InlineResponse20016(BaseModel): +class InlineResponse20015(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["ScrollResult"] = Field(default=None, description="") -class InlineResponse20017(BaseModel): +class InlineResponse20016(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List["ScoredPoint"]] = Field(default=None, description="") -class InlineResponse20018(BaseModel): +class InlineResponse20017(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List[List["ScoredPoint"]]] = Field(default=None, description="") -class InlineResponse20019(BaseModel): +class InlineResponse20018(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["GroupsResult"] = Field(default=None, description="") -class InlineResponse2002(BaseModel): +class InlineResponse20019(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["LocksOption"] = Field(default=None, description="") + result: Optional["CountResult"] = Field(default=None, description="") -class InlineResponse20020(BaseModel): +class InlineResponse2002(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CountResult"] = Field(default=None, description="") + result: Optional["ClusterStatus"] = Field(default=None, description="") -class InlineResponse20021(BaseModel): +class InlineResponse20020(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["FacetResponse"] = Field(default=None, description="") -class InlineResponse20022(BaseModel): +class InlineResponse20021(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["QueryResponse"] = Field(default=None, description="") -class InlineResponse20023(BaseModel): +class InlineResponse20022(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional[List["QueryResponse"]] = Field(default=None, description="") -class InlineResponse20024(BaseModel): +class InlineResponse20023(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") result: Optional["SearchMatrixPairsResponse"] = Field(default=None, description="") -class InlineResponse20025(BaseModel): +class InlineResponse20024(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") @@ -1275,49 +1361,48 @@ class InlineResponse2003(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["ClusterStatus"] = Field(default=None, description="") + result: Optional["CollectionsResponse"] = Field(default=None, description="") class InlineResponse2004(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CollectionsResponse"] = Field(default=None, description="") + result: Optional["CollectionInfo"] = Field(default=None, description="") class InlineResponse2005(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CollectionInfo"] = Field(default=None, description="") + result: Optional["UpdateResult"] = Field(default=None, description="") class InlineResponse2006(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["UpdateResult"] = Field(default=None, description="") + result: Optional["CollectionExistence"] = Field(default=None, description="") class InlineResponse2007(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CollectionExistence"] = Field(default=None, description="") + result: Optional["CollectionClusterInfo"] = Field(default=None, description="") class InlineResponse2008(BaseModel): usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CollectionClusterInfo"] = Field(default=None, description="") + result: Optional["CollectionsAliasesResponse"] = Field(default=None, description="") class InlineResponse2009(BaseModel): - usage: Optional["Usage"] = Field(default=None, description="") time: Optional[float] = Field(default=None, description="Time spent to process this request") status: Optional[str] = Field(default=None, description="") - result: Optional["CollectionsAliasesResponse"] = Field(default=None, description="") + result: Optional[bool] = Field(default=None, description="") class InlineResponse202(BaseModel): @@ -1438,14 +1523,14 @@ class LocalShardTelemetry(BaseModel): default=None, description="Sum of number of vectors in all segments This is an approximate number Do NOT rely on this number unless you know what you are doing", ) + num_vectors_by_name: Optional[Dict[str, int]] = Field( + default=None, + description="Sum of number of vectors across all segments, grouped by their name. This is an approximate number. Do NOT rely on this number unless you know what you are doing", + ) segments: Optional[List["SegmentTelemetry"]] = Field(default=None, description="") optimizations: "OptimizerTelemetry" = Field(..., description="") async_scorer: Optional[bool] = Field(default=None, description="") - - -class LocksOption(BaseModel, extra="forbid"): - error_message: Optional[str] = Field(default=None, description="") - write: bool = Field(..., description="") + indexed_only_excluded_vectors: Optional[Dict[str, int]] = Field(default=None, description="") class Log10Expression(BaseModel, extra="forbid"): @@ -1502,6 +1587,14 @@ class MatchText(BaseModel, extra="forbid"): text: str = Field(..., description="Full-text match of the strings.") +class MatchTextAny(BaseModel, extra="forbid"): + """ + Full-text match of at least one token of the string. + """ + + text_any: str = Field(..., description="Full-text match of at least one token of the string.") + + class MatchValue(BaseModel, extra="forbid"): """ Exact match of the given value @@ -1858,6 +1951,14 @@ class PeerInfo(BaseModel): uri: str = Field(..., description="Information of a peer in the cluster") +class PeerMetadata(BaseModel): + """ + Metadata describing extra properties for each peer + """ + + version: str = Field(..., description="Peer Qdrant version") + + class PointGroup(BaseModel): hits: List["ScoredPoint"] = Field(..., description="Scored points that have the same value of the group_by key") id: "GroupId" = Field(..., description="") @@ -1895,11 +1996,19 @@ class PointVectors(BaseModel, extra="forbid"): class PointsBatch(BaseModel, extra="forbid"): batch: "Batch" = Field(..., description="") shard_key: Optional["ShardKeySelector"] = Field(default=None, description="") + update_filter: Optional["Filter"] = Field( + default=None, + description="If specified, only points that match this filter will be updated, others will be inserted", + ) class PointsList(BaseModel, extra="forbid"): points: List["PointStruct"] = Field(..., description="") shard_key: Optional["ShardKeySelector"] = Field(default=None, description="") + update_filter: Optional["Filter"] = Field( + default=None, + description="If specified, only points that match this filter will be updated, others will be inserted", + ) class PowExpression(BaseModel, extra="forbid"): @@ -2285,6 +2394,17 @@ def __str__(self) -> str: RECOVERY = "Recovery" RESHARDING = "Resharding" RESHARDINGSCALEDOWN = "ReshardingScaleDown" + ACTIVEREAD = "ActiveRead" + + +class ReplicatePoints(BaseModel, extra="forbid"): + filter: Optional["Filter"] = Field(default=None, description="") + from_shard_key: "ShardKey" = Field(..., description="") + to_shard_key: "ShardKey" = Field(..., description="") + + +class ReplicatePointsOperation(BaseModel, extra="forbid"): + replicate_points: "ReplicatePoints" = Field(..., description="") class ReplicateShard(BaseModel, extra="forbid"): @@ -2345,6 +2465,18 @@ class RestartTransferOperation(BaseModel, extra="forbid"): restart_transfer: "RestartTransfer" = Field(..., description="") +class Rrf(BaseModel, extra="forbid"): + """ + Parameters for Reciprocal Rank Fusion + """ + + k: Optional[int] = Field(default=None, description="K parameter for reciprocal rank fusion") + + +class RrfQuery(BaseModel, extra="forbid"): + rrf: "Rrf" = Field(..., description="") + + class RunningEnvironmentTelemetry(BaseModel): distribution: Optional[str] = Field(default=None, description="") distribution_version: Optional[str] = Field(default=None, description="") @@ -2517,6 +2649,7 @@ class SearchParams(BaseModel, extra="forbid"): default=False, description="If enabled, the engine will only perform search among indexed or small segments. Using this option prevents slow searches in case of delayed index, but does not guarantee that all uploaded vectors will be included in search results", ) + acorn: Optional["AcornSearchParams"] = Field(default=None, description="ACORN search params") class SearchRequest(BaseModel, extra="forbid"): @@ -2653,6 +2786,11 @@ class ShardCleanStatusTelemetryOneOf2(BaseModel): failed: "ShardCleanStatusFailedTelemetry" = Field(..., description="") +class ShardKeyWithFallback(BaseModel, extra="forbid"): + target: "ShardKey" = Field(..., description="") + fallback: "ShardKey" = Field(..., description="") + + class ShardSnapshotRecover(BaseModel, extra="forbid"): location: "ShardSnapshotLocation" = Field(..., description="") priority: Optional["SnapshotPriority"] = Field(default=None, description="") @@ -2905,6 +3043,9 @@ class SparseVectorDataConfig(BaseModel): storage_type: Optional["SparseVectorStorageType"] = Field( default=None, description="Config of single sparse vector data storage" ) + modifier: Optional["Modifier"] = Field( + default=None, description="Configures addition value modifications for sparse vectors. Default: none" + ) class SparseVectorParams(BaseModel, extra="forbid"): @@ -2971,8 +3112,13 @@ def __str__(self) -> str: class StopwordsSet(BaseModel, extra="forbid"): - languages: Optional[List["Language"]] = Field(default=None, description="") - custom: Optional[List[str]] = Field(default=None, description="") + languages: Optional[List["Language"]] = Field( + default=None, + description="Set of languages to use for stopwords. Multiple pre-defined lists of stopwords can be combined.", + ) + custom: Optional[List[str]] = Field( + default=None, description="Custom stopwords set. Will be merged with the languages set." + ) class StrictModeConfig(BaseModel, extra="forbid"): @@ -2987,8 +3133,10 @@ class StrictModeConfig(BaseModel, extra="forbid"): unindexed_filtering_update: Optional[bool] = Field( default=None, description="Allow usage of unindexed fields in filtered updates (e.g. delete by payload)." ) - search_max_hnsw_ef: Optional[int] = Field(default=None, description="Max HNSW value allowed in search parameters.") - search_allow_exact: Optional[bool] = Field(default=None, description="Whether exact search is allowed or not.") + search_max_hnsw_ef: Optional[int] = Field( + default=None, description="Max HNSW ef value allowed in search parameters." + ) + search_allow_exact: Optional[bool] = Field(default=None, description="Whether exact search is allowed.") search_max_oversampling: Optional[float] = Field( default=None, description="Max oversampling value allowed in search." ) @@ -3011,9 +3159,14 @@ class StrictModeConfig(BaseModel, extra="forbid"): default=None, description="Max size of a condition, eg. items in `MatchAny`." ) multivector_config: Optional["StrictModeMultivectorConfig"] = Field( - default=None, description="Multivector configuration" + default=None, description="Multivector strict mode configuration" + ) + sparse_config: Optional["StrictModeSparseConfig"] = Field( + default=None, description="Sparse vector strict mode configuration" + ) + max_payload_index_count: Optional[int] = Field( + default=None, description="Max number of payload indexes in a collection" ) - sparse_config: Optional["StrictModeSparseConfig"] = Field(default=None, description="Sparse vector configuration") class StrictModeConfigOutput(BaseModel): @@ -3057,6 +3210,9 @@ class StrictModeConfigOutput(BaseModel): sparse_config: Optional["StrictModeSparseConfigOutput"] = Field( default=None, description="Sparse vector configuration" ) + max_payload_index_count: Optional[int] = Field( + default=None, description="Max number of payload indexes in a collection" + ) class StrictModeMultivector(BaseModel, extra="forbid"): @@ -3095,6 +3251,10 @@ class TextIndexParams(BaseModel, extra="forbid"): min_token_len: Optional[int] = Field(default=None, description="Minimum characters to be tokenized.") max_token_len: Optional[int] = Field(default=None, description="Maximum characters to be tokenized.") lowercase: Optional[bool] = Field(default=None, description="If true, lowercase all tokens. Default: true.") + ascii_folding: Optional[bool] = Field( + default=None, + description="If true, normalize tokens by folding accented characters to ASCII (e.g., 'ação' -> 'acao'). Default: false.", + ) phrase_matching: Optional[bool] = Field( default=None, description="If true, support phrase matching. Default: false." ) @@ -3172,6 +3332,10 @@ class UpdateCollection(BaseModel, extra="forbid"): strict_mode_config: Optional["StrictModeConfig"] = Field( default=None, description="Operation for updating parameters of the existing collection" ) + metadata: Optional["Payload"] = Field( + default=None, + description="Metadata to update for the collection. If provided, this will merge with existing metadata. To remove metadata, set it to an empty object.", + ) class UpdateOperations(BaseModel, extra="forbid"): @@ -3198,6 +3362,7 @@ def __str__(self) -> str: class UpdateVectors(BaseModel, extra="forbid"): points: List["PointVectors"] = Field(..., description="Points with named vectors") shard_key: Optional["ShardKeySelector"] = Field(default=None, description="") + update_filter: Optional["Filter"] = Field(default=None, description="") class UpdateVectorsOperation(BaseModel, extra="forbid"): @@ -3387,6 +3552,7 @@ class VersionInfo(BaseModel): class WalConfig(BaseModel): wal_capacity_mb: int = Field(..., description="Size of a single WAL segment in MB") wal_segments_ahead: int = Field(..., description="Number of WAL segments to create ahead of actually used ones") + wal_retain_closed: Optional[int] = Field(default=1, description="Number of closed WAL segments to keep") class WalConfigDiff(BaseModel, extra="forbid"): @@ -3394,6 +3560,7 @@ class WalConfigDiff(BaseModel, extra="forbid"): wal_segments_ahead: Optional[int] = Field( default=None, description="Number of WAL segments to create ahead of actually used ones" ) + wal_retain_closed: Optional[int] = Field(default=None, description="Number of closed WAL segments to retain") class WebApiTelemetry(BaseModel): @@ -3442,6 +3609,7 @@ def __str__(self) -> str: RestartTransferOperation, StartReshardingOperation, AbortReshardingOperation, + ReplicatePointsOperation, ] ClusterStatus = Union[ ClusterStatusOneOf, @@ -3469,9 +3637,13 @@ def __str__(self) -> str: ContextPair, List[ContextPair], ] +DocumentOptions = Union[ + Dict[StrictStr, Any], + Bm25Config, +] ExtendedPointId = Union[ StrictInt, - StrictStr, + Union[StrictStr, UUID], ] FacetValue = Union[ StrictBool, @@ -3489,6 +3661,7 @@ def __str__(self) -> str: Match = Union[ MatchValue, MatchText, + MatchTextAny, MatchPhrase, MatchAny, MatchExcept, @@ -3560,6 +3733,7 @@ def __str__(self) -> str: ContextQuery, OrderByQuery, FusionQuery, + RrfQuery, FormulaQuery, SampleQuery, ] @@ -3710,6 +3884,7 @@ def __str__(self) -> str: ShardKeySelector = Union[ ShardKey, List[ShardKey], + ShardKeyWithFallback, ] VectorInput = Union[ List[StrictFloat], diff --git a/qdrant_client/hybrid/fusion.py b/qdrant_client/hybrid/fusion.py index 1c8f280a..561d630a 100644 --- a/qdrant_client/hybrid/fusion.py +++ b/qdrant_client/hybrid/fusion.py @@ -1,13 +1,20 @@ +from typing import Optional + from qdrant_client.http import models +DEFAULT_RANKING_CONSTANT_K = 2 + + def reciprocal_rank_fusion( - responses: list[list[models.ScoredPoint]], limit: int = 10 + responses: list[list[models.ScoredPoint]], + limit: int = 10, + ranking_constant_k: Optional[int] = None, ) -> list[models.ScoredPoint]: def compute_score(pos: int) -> float: ranking_constant = ( - 2 # the constant mitigates the impact of high rankings by outlier systems - ) + ranking_constant_k if ranking_constant_k is not None else DEFAULT_RANKING_CONSTANT_K + ) # mitigates the impact of high rankings by outlier systems return 1 / (ranking_constant + pos) scores: dict[models.ExtendedPointId, float] = {} diff --git a/qdrant_client/local/async_qdrant_local.py b/qdrant_client/local/async_qdrant_local.py index 0f46715d..07bbaa6a 100644 --- a/qdrant_client/local/async_qdrant_local.py +++ b/qdrant_client/local/async_qdrant_local.py @@ -14,6 +14,7 @@ import json import os import shutil +import uuid from copy import deepcopy from io import TextIOWrapper from typing import Any, Generator, Iterable, Mapping, Optional, Sequence, Union, get_args @@ -25,7 +26,6 @@ from qdrant_client.async_client_base import AsyncQdrantBase from qdrant_client.conversions import common_types as types from qdrant_client.http import models as rest_models -from qdrant_client.http.models.models import RecommendExample from qdrant_client.local.local_collection import ( LocalCollection, DEFAULT_VECTOR_NAME, @@ -159,24 +159,7 @@ def _get_collection(self, collection_name: str) -> LocalCollection: return self.collections[self.aliases[collection_name]] raise ValueError(f"Collection {collection_name} not found") - async def search_batch( - self, collection_name: str, requests: Sequence[types.SearchRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - return [ - collection.search( - query_vector=request.vector, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - score_threshold=request.score_threshold, - ) - for request in requests - ] - - async def search( + def search( self, collection_name: str, query_vector: Union[ @@ -234,43 +217,6 @@ async def search_matrix_pairs( query_filter=query_filter, limit=limit, sample=sample, using=using ) - async def search_groups( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, Sequence[float], tuple[str, list[float]], types.NamedVector - ], - group_by: str, - query_filter: Optional[rest_models.Filter] = None, - search_params: Optional[rest_models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], rest_models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - **kwargs: Any, - ) -> types.GroupsResult: - collection = self._get_collection(collection_name) - with_lookup_collection = None - if with_lookup is not None: - if isinstance(with_lookup, str): - with_lookup_collection = self._get_collection(with_lookup) - else: - with_lookup_collection = self._get_collection(with_lookup.collection) - return collection.search_groups( - query_vector=query_vector, - query_filter=query_filter, - limit=limit, - group_by=group_by, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - with_lookup=with_lookup, - with_lookup_collection=with_lookup_collection, - ) - def _resolve_query_input( self, collection_name: str, @@ -302,6 +248,8 @@ def _resolve_query_input( def input_into_vector(vector_input: types.VectorInput) -> types.VectorInput: if isinstance(vector_input, get_args(types.PointId)): + if isinstance(vector_input, uuid.UUID): + vector_input = str(vector_input) point_id = vector_input if point_id not in collection.ids: raise ValueError(f"Point {point_id} is not found in the collection") @@ -357,6 +305,8 @@ def input_into_vector(vector_input: types.VectorInput) -> types.VectorInput: pass elif isinstance(query, rest_models.FusionQuery): pass + elif isinstance(query, rest_models.RrfQuery): + pass return (query, mentioned_ids) def _resolve_prefetches_input( @@ -507,170 +457,6 @@ async def query_points_groups( with_lookup_collection=with_lookup_collection, ) - async def recommend_batch( - self, collection_name: str, requests: Sequence[types.RecommendRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - return [ - collection.recommend( - positive=request.positive, - negative=request.negative, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - score_threshold=request.score_threshold, - using=request.using, - lookup_from_collection=self._get_collection(request.lookup_from.collection) - if request.lookup_from - else None, - lookup_from_vector_name=request.lookup_from.vector - if request.lookup_from - else None, - strategy=request.strategy, - ) - for request in requests - ] - - async def recommend( - self, - collection_name: str, - positive: Optional[Sequence[RecommendExample]] = None, - negative: Optional[Sequence[RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - collection = self._get_collection(collection_name) - return collection.recommend( - positive=positive, - negative=negative, - query_filter=query_filter, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - strategy=strategy, - ) - - async def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - negative: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> types.GroupsResult: - collection = self._get_collection(collection_name) - with_lookup_collection = None - if with_lookup is not None: - if isinstance(with_lookup, str): - with_lookup_collection = self._get_collection(with_lookup) - else: - with_lookup_collection = self._get_collection(with_lookup.collection) - return collection.recommend_groups( - positive=positive, - negative=negative, - group_by=group_by, - group_size=group_size, - query_filter=query_filter, - limit=limit, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - with_lookup=with_lookup, - with_lookup_collection=with_lookup_collection, - strategy=strategy, - ) - - async def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - collection = self._get_collection(collection_name) - return collection.discover( - target=target, - context=context, - query_filter=query_filter, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - ) - - async def discover_batch( - self, collection_name: str, requests: Sequence[types.DiscoverRequest], **kwargs: Any - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - return [ - collection.discover( - target=request.target, - context=request.context, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - using=request.using, - lookup_from_collection=self._get_collection(request.lookup_from.collection) - if request.lookup_from - else None, - lookup_from_vector_name=request.lookup_from.vector - if request.lookup_from - else None, - ) - for request in requests - ] - async def scroll( self, collection_name: str, @@ -715,17 +501,25 @@ async def facet( return collection.facet(key=key, facet_filter=facet_filter, limit=limit) async def upsert( - self, collection_name: str, points: types.Points, **kwargs: Any + self, + collection_name: str, + points: types.Points, + update_filter: Optional[types.Filter] = None, + **kwargs: Any, ) -> types.UpdateResult: collection = self._get_collection(collection_name) - collection.upsert(points) + collection.upsert(points, update_filter=update_filter) return self._default_update_result() async def update_vectors( - self, collection_name: str, points: Sequence[types.PointVectors], **kwargs: Any + self, + collection_name: str, + points: Sequence[types.PointVectors], + update_filter: Optional[types.Filter] = None, + **kwargs: Any, ) -> types.UpdateResult: collection = self._get_collection(collection_name) - collection.update_vectors(points) + collection.update_vectors(points, update_filter=update_filter) return self._default_update_result() async def delete_vectors( @@ -882,14 +676,23 @@ async def update_collection( self, collection_name: str, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: _collection = self._get_collection(collection_name) + updated = False if sparse_vectors_config is not None: for vector_name, vector_params in sparse_vectors_config.items(): _collection.update_sparse_vectors_config(vector_name, vector_params) - return True - return False + updated = True + if metadata is not None: + if _collection.config.metadata is not None: + _collection.config.metadata.update(metadata) + else: + _collection.config.metadata = deepcopy(metadata) + updated = True + self._save() + return updated def _collection_path(self, collection_name: str) -> Optional[str]: if self.persistent: @@ -919,19 +722,12 @@ async def create_collection( vectors_config: Optional[ Union[types.VectorParams, Mapping[str, types.VectorParams]] ] = None, - init_from: Optional[types.InitFrom] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: if self.closed: raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.") - src_collection = None - from_collection_name = None - if init_from is not None: - from_collection_name = ( - init_from if isinstance(init_from, str) else init_from.collection - ) - src_collection = self._get_collection(from_collection_name) if collection_name in self.collections: raise ValueError(f"Collection {collection_name} already exists") collection_path = self._collection_path(collection_name) @@ -939,23 +735,14 @@ async def create_collection( os.makedirs(collection_path, exist_ok=True) collection = LocalCollection( rest_models.CreateCollection( - vectors=vectors_config or {}, sparse_vectors=sparse_vectors_config + vectors=vectors_config or {}, + sparse_vectors=sparse_vectors_config, + metadata=deepcopy(metadata), ), location=collection_path, force_disable_check_same_thread=self.force_disable_check_same_thread, ) self.collections[collection_name] = collection - if src_collection and from_collection_name: - batch_size = 100 - (records, next_offset) = await self.scroll( - from_collection_name, limit=2, with_vectors=True - ) - self.upload_records(collection_name, records) - while next_offset is not None: - (records, next_offset) = await self.scroll( - from_collection_name, offset=next_offset, limit=batch_size, with_vectors=True - ) - self.upload_records(collection_name, records) self._save() return True @@ -963,27 +750,29 @@ async def recreate_collection( self, collection_name: str, vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]], - init_from: Optional[types.InitFrom] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: await self.delete_collection(collection_name) return await self.create_collection( - collection_name, vectors_config, init_from, sparse_vectors_config + collection_name, vectors_config, sparse_vectors_config, metadata=metadata ) def upload_points( - self, collection_name: str, points: Iterable[types.PointStruct], **kwargs: Any - ) -> None: - self._upload_points(collection_name, points) - - def upload_records( - self, collection_name: str, records: Iterable[types.Record], **kwargs: Any + self, + collection_name: str, + points: Iterable[types.PointStruct], + update_filter: Optional[types.Filter] = None, + **kwargs: Any, ) -> None: - self._upload_points(collection_name, records) + self._upload_points(collection_name, points, update_filter=update_filter) def _upload_points( - self, collection_name: str, points: Iterable[Union[types.PointStruct, types.Record]] + self, + collection_name: str, + points: Iterable[Union[types.PointStruct, types.Record]], + update_filter: Optional[types.Filter] = None, ) -> None: collection = self._get_collection(collection_name) collection.upsert( @@ -992,7 +781,8 @@ def _upload_points( id=point.id, vector=point.vector or {}, payload=point.payload or {} ) for point in points - ] + ], + update_filter=update_filter, ) def upload_collection( @@ -1003,6 +793,7 @@ def upload_collection( ], payload: Optional[Iterable[dict[Any, Any]]] = None, ids: Optional[Iterable[types.PointId]] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: def uuid_generator() -> Generator[str, None, None]: @@ -1024,14 +815,15 @@ def uuid_generator() -> Generator[str, None, None]: collection.upsert( [ rest_models.PointStruct( - id=point_id, + id=str(point_id) if isinstance(point_id, uuid.UUID) else point_id, vector=(vector.tolist() if isinstance(vector, np.ndarray) else vector) or {}, payload=payload or {}, ) for (point_id, vector, payload) in zip( ids or uuid_generator(), iter(vectors), payload or itertools.cycle([{}]) ) - ] + ], + update_filter=update_filter, ) async def create_payload_index( @@ -1124,19 +916,6 @@ async def recover_shard_snapshot( "Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need snapshots." ) - async def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError( - "Locks are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots." - ) - - async def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError( - "Locks are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots." - ) - - async def get_locks(self, **kwargs: Any) -> types.LocksOption: - return types.LocksOption(error_message=None, write=False) - async def create_shard_key( self, collection_name: str, diff --git a/qdrant_client/local/local_collection.py b/qdrant_client/local/local_collection.py index f3677a54..bc2392b4 100644 --- a/qdrant_client/local/local_collection.py +++ b/qdrant_client/local/local_collection.py @@ -56,7 +56,7 @@ ) from qdrant_client.local.json_path_parser import JsonPathItem, parse_json_path from qdrant_client.local.order_by import to_order_value -from qdrant_client.local.payload_filters import calculate_payload_mask +from qdrant_client.local.payload_filters import calculate_payload_mask, check_filter from qdrant_client.local.payload_value_extractor import value_by_key, parse_uuid from qdrant_client.local.payload_value_setter import set_value_by_key from qdrant_client.local.persistence import CollectionPersistence @@ -809,16 +809,23 @@ def _merge_sources( with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, with_vectors: Union[bool, Sequence[str]] = False, ) -> list[types.ScoredPoint]: - if isinstance(query, models.FusionQuery): + if isinstance(query, (models.FusionQuery, models.RrfQuery)): # Fuse results - if query.fusion == models.Fusion.RRF: - # RRF: Reciprocal Rank Fusion - fused = reciprocal_rank_fusion(responses=sources, limit=limit + offset) - elif query.fusion == models.Fusion.DBSF: - # DBSF: Distribution-Based Score Fusion - fused = distribution_based_score_fusion(responses=sources, limit=limit + offset) + if isinstance(query, models.RrfQuery): + fused = reciprocal_rank_fusion( + responses=sources, limit=limit + offset, ranking_constant_k=query.rrf.k + ) else: - raise ValueError(f"Fusion method {query.fusion} does not exist") + if query.fusion == models.Fusion.RRF: + # RRF: Reciprocal Rank Fusion + fused = reciprocal_rank_fusion(responses=sources, limit=limit + offset) + elif query.fusion == models.Fusion.DBSF: + # DBSF: Distribution-Based Score Fusion + fused = distribution_based_score_fusion( + responses=sources, limit=limit + offset + ) + else: + raise ValueError(f"Fusion method {query.fusion} does not exist") # Fetch payload and vectors ids = [point.id for point in fused] @@ -971,9 +978,11 @@ def _query_collection( else: raise ValueError(f"Unknown Sample variant: {query.sample}") elif isinstance(query, models.FusionQuery): - raise AssertionError("Cannot perform fusion without prefetches") + raise ValueError("Cannot perform fusion without prefetches") elif isinstance(query, models.FormulaQuery): - raise AssertionError("Cannot perform formula without prefetches") + raise ValueError("Cannot perform formula without prefetches") + elif isinstance(query, models.RrfQuery): + raise ValueError("Cannot perform RRF query without prefetches") else: # most likely a VectorInput, delegate to search return self.search( @@ -1217,7 +1226,7 @@ def retrieve( with_vectors: Union[bool, Sequence[str]] = False, ) -> list[models.Record]: result = [] - + ids = [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in ids] for point_id in ids: if point_id not in self.ids: continue @@ -1663,12 +1672,6 @@ def _search_distance_matrix( def _preprocess_target( target: Optional[models.VectorInput], collection: "LocalCollection", vector_name: str ) -> tuple[models.Vector, Optional[types.PointId]]: - # todo: context can no longer be grpc.TargetVector, but models.VectorInput, currently, grpc types are not supported - target = ( - GrpcToRest.convert_target_vector(target) - if target is not None and isinstance(target, grpc.TargetVector) - else target - ) if isinstance(target, get_args(types.PointId)): if target not in collection.ids: raise ValueError(f"Point {target} is not found in the collection") @@ -1690,15 +1693,6 @@ def _preprocess_context( ) -> tuple[ list[ContextPair], list[SparseContextPair], list[MultiContextPair], list[types.PointId] ]: - # todo: context can no longer be ContextExamplePair, currently grpc types are not supported - context = [ - ( - GrpcToRest.convert_context_example_pair(pair) - if isinstance(pair, grpc.ContextExamplePair) - else pair - ) - for pair in context - ] mentioned_ids = [] dense_context_vectors = [] sparse_context_vectors = [] @@ -2269,6 +2263,8 @@ def _rescore_with_formula( return rescored[:limit] def _update_point(self, point: models.PointStruct) -> None: + if isinstance(point.id, uuid.UUID): + point.id = str(point.id) idx = self.ids[point.id] self.payload[idx] = deepcopy( to_jsonable_python(point.payload) if point.payload is not None else {} @@ -2327,6 +2323,9 @@ def _update_point(self, point: models.PointStruct) -> None: def _add_point(self, point: models.PointStruct) -> None: idx = len(self.ids) + if isinstance(point.id, uuid.UUID): + point.id = str(point.id) + self.ids[point.id] = idx self.ids_inv.append(point.id) @@ -2421,7 +2420,11 @@ def _add_point(self, point: models.PointStruct) -> None: self.multivectors[vector_name] = named_vectors - def _upsert_point(self, point: models.PointStruct) -> None: + def _upsert_point( + self, + point: models.PointStruct, + update_filter: Optional[types.Filter] = None, + ) -> None: if isinstance(point.id, str): # try to parse as UUID try: @@ -2454,7 +2457,20 @@ def _upsert_point(self, point: models.PointStruct) -> None: if not self.vectors and not self.multivectors: raise ValueError("Wrong input: Not existing vector name error") + if isinstance(point.id, uuid.UUID): + point.id = str(point.id) + if point.id in self.ids: + idx = self.ids[point.id] + if not self.deleted[idx] and update_filter is not None: + has_vector = {} + for vector_name, deleted in self.deleted_per_vector.items(): + if not deleted[idx]: + has_vector[vector_name] = True + if not check_filter( + update_filter, self.payload[idx], self.ids_inv[idx], has_vector + ): + return None self._update_point(point) else: self._add_point(point) @@ -2462,10 +2478,14 @@ def _upsert_point(self, point: models.PointStruct) -> None: if self.storage is not None: self.storage.persist(point) - def upsert(self, points: Union[Sequence[models.PointStruct], models.Batch]) -> None: + def upsert( + self, + points: Union[Sequence[models.PointStruct], models.Batch], + update_filter: Optional[types.Filter] = None, + ) -> None: if isinstance(points, list): for point in points: - self._upsert_point(point) + self._upsert_point(point, update_filter=update_filter) elif isinstance(points, models.Batch): batch = points if isinstance(batch.vectors, list): @@ -2485,7 +2505,8 @@ def upsert(self, points: Union[Sequence[models.PointStruct], models.Batch]) -> N id=point_id, payload=payload, vector=vector, - ) + ), + update_filter=update_filter, ) else: raise ValueError(f"Unsupported type: {type(points)}") @@ -2532,15 +2553,27 @@ def _update_named_vectors( vector_np /= np.where(vector_norm != 0.0, vector_norm, EPSILON) self.multivectors[vector_name][idx] = vector_np - def update_vectors(self, points: Sequence[types.PointVectors]) -> None: + def update_vectors( + self, points: Sequence[types.PointVectors], update_filter: Optional[types.Filter] = None + ) -> None: for point in points: - point_id = point.id + point_id = str(point.id) if isinstance(point.id, uuid.UUID) else point.id idx = self.ids[point_id] vector_struct = point.vector if isinstance(vector_struct, list): fixed_vectors = {DEFAULT_VECTOR_NAME: vector_struct} else: fixed_vectors = vector_struct + + if not self.deleted[idx] and update_filter is not None: + has_vector = {} + for vector_name, deleted in self.deleted_per_vector.items(): + if not deleted[idx]: + has_vector[vector_name] = True + if not check_filter( + update_filter, self.payload[idx], self.ids_inv[idx], has_vector + ): + return None self._update_named_vectors(idx, fixed_vectors) self._persist_by_id(point_id) @@ -2587,11 +2620,11 @@ def _selector_to_ids( ], ) -> list[models.ExtendedPointId]: if isinstance(selector, list): - return selector + return [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in selector] elif isinstance(selector, models.Filter): return self._filter_to_ids(selector) elif isinstance(selector, models.PointIdsList): - return selector.points + return [str(id_) if isinstance(id_, uuid.UUID) else id_ for id_ in selector.points] elif isinstance(selector, models.FilterSelector): return self._filter_to_ids(selector.filter) else: @@ -2700,10 +2733,11 @@ def batch_update_points( ) -> None: for update_op in update_operations: if isinstance(update_op, models.UpsertOperation): - if isinstance(update_op.upsert, models.PointsBatch): - self.upsert(update_op.upsert.batch) - elif isinstance(update_op.upsert, models.PointsList): - self.upsert(update_op.upsert.points) + upsert_struct = update_op.upsert + if isinstance(upsert_struct, models.PointsBatch): + self.upsert(upsert_struct.batch, update_filter=upsert_struct.update_filter) + elif isinstance(upsert_struct, models.PointsList): + self.upsert(upsert_struct.points, update_filter=upsert_struct.update_filter) else: raise ValueError(f"Unsupported upsert type: {type(update_op.upsert)}") elif isinstance(update_op, models.DeleteOperation): @@ -2726,7 +2760,10 @@ def batch_update_points( elif isinstance(update_op, models.ClearPayloadOperation): self.clear_payload(update_op.clear_payload) elif isinstance(update_op, models.UpdateVectorsOperation): - self.update_vectors(update_op.update_vectors.points) + update_vectors = update_op.update_vectors + self.update_vectors( + update_vectors.points, update_filter=update_vectors.update_filter + ) elif isinstance(update_op, models.DeleteVectorsOperation): points_selector = ( update_op.delete_vectors.points or update_op.delete_vectors.filter @@ -2747,7 +2784,6 @@ def info(self) -> models.CollectionInfo: return models.CollectionInfo( status=models.CollectionStatus.GREEN, optimizer_status=models.OptimizersStatusOneOf.OK, - vectors_count=None, indexed_vectors_count=0, # LocalCollection does not do indexing points_count=self.count().count, segments_count=1, @@ -2779,6 +2815,7 @@ def info(self) -> models.CollectionInfo: max_optimization_threads=1, ), quantization_config=None, + metadata=self.config.metadata, ), ) diff --git a/qdrant_client/local/multi_distances.py b/qdrant_client/local/multi_distances.py index 79935181..35e0f9f5 100644 --- a/qdrant_client/local/multi_distances.py +++ b/qdrant_client/local/multi_distances.py @@ -5,8 +5,6 @@ from qdrant_client.http import models from qdrant_client.conversions import common_types as types from qdrant_client.local.distances import ( - distance_to_order, - DistanceOrder, calculate_distance, scaled_fast_sigmoid, EPSILON, diff --git a/qdrant_client/local/payload_filters.py b/qdrant_client/local/payload_filters.py index dc911f8f..8712502d 100644 --- a/qdrant_client/local/payload_filters.py +++ b/qdrant_client/local/payload_filters.py @@ -1,5 +1,6 @@ from datetime import date, datetime, timezone from typing import Any, Optional, Union, Dict +from uuid import UUID import numpy as np @@ -152,6 +153,8 @@ def check_match(condition: models.Match, value: Any) -> bool: return value == condition.value if isinstance(condition, models.MatchText): return value is not None and condition.text in value + if isinstance(condition, models.MatchTextAny): + return value is not None and any(word in value for word in condition.text_any.split()) if isinstance(condition, models.MatchAny): return value in condition.any if isinstance(condition, models.MatchExcept): @@ -184,7 +187,8 @@ def check_condition( ): return True elif isinstance(condition, models.HasIdCondition): - if point_id in condition.has_id: + ids = [str(id_) if isinstance(id_, UUID) else id_ for id_ in condition.has_id] + if point_id in ids: return True elif isinstance(condition, models.HasVectorCondition): if condition.has_vector in has_vector and has_vector[condition.has_vector]: diff --git a/qdrant_client/local/qdrant_local.py b/qdrant_client/local/qdrant_local.py index 6b5d0b87..955d80c8 100644 --- a/qdrant_client/local/qdrant_local.py +++ b/qdrant_client/local/qdrant_local.py @@ -3,6 +3,7 @@ import json import os import shutil +import uuid from copy import deepcopy from io import TextIOWrapper from typing import ( @@ -25,7 +26,6 @@ from qdrant_client.client_base import QdrantBase from qdrant_client.conversions import common_types as types from qdrant_client.http import models as rest_models -from qdrant_client.http.models.models import RecommendExample from qdrant_client.local.local_collection import ( LocalCollection, DEFAULT_VECTOR_NAME, @@ -171,27 +171,6 @@ def _get_collection(self, collection_name: str) -> LocalCollection: return self.collections[self.aliases[collection_name]] raise ValueError(f"Collection {collection_name} not found") - def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - - return [ - collection.search( - query_vector=request.vector, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - score_threshold=request.score_threshold, - ) - for request in requests - ] - def search( self, collection_name: str, @@ -250,47 +229,6 @@ def search_matrix_pairs( query_filter=query_filter, limit=limit, sample=sample, using=using ) - def search_groups( - self, - collection_name: str, - query_vector: Union[ - types.NumpyArray, - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - ], - group_by: str, - query_filter: Optional[rest_models.Filter] = None, - search_params: Optional[rest_models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], rest_models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - **kwargs: Any, - ) -> types.GroupsResult: - collection = self._get_collection(collection_name) - with_lookup_collection = None - if with_lookup is not None: - if isinstance(with_lookup, str): - with_lookup_collection = self._get_collection(with_lookup) - else: - with_lookup_collection = self._get_collection(with_lookup.collection) - - return collection.search_groups( - query_vector=query_vector, - query_filter=query_filter, - limit=limit, - group_by=group_by, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - with_lookup=with_lookup, - with_lookup_collection=with_lookup_collection, - ) - def _resolve_query_input( self, collection_name: str, @@ -329,6 +267,8 @@ def input_into_vector( vector_input: types.VectorInput, ) -> types.VectorInput: if isinstance(vector_input, get_args(types.PointId)): + if isinstance(vector_input, uuid.UUID): + vector_input = str(vector_input) point_id = vector_input # rename for clarity if point_id not in collection.ids: raise ValueError(f"Point {point_id} is not found in the collection") @@ -387,6 +327,9 @@ def input_into_vector( pass elif isinstance(query, rest_models.FusionQuery): pass + elif isinstance(query, rest_models.RrfQuery): + pass + return query, mentioned_ids def _resolve_prefetches_input( @@ -554,179 +497,6 @@ def query_points_groups( with_lookup_collection=with_lookup_collection, ) - def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - - return [ - collection.recommend( - positive=request.positive, - negative=request.negative, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - score_threshold=request.score_threshold, - using=request.using, - lookup_from_collection=self._get_collection(request.lookup_from.collection) - if request.lookup_from - else None, - lookup_from_vector_name=request.lookup_from.vector - if request.lookup_from - else None, - strategy=request.strategy, - ) - for request in requests - ] - - def recommend( - self, - collection_name: str, - positive: Optional[Sequence[RecommendExample]] = None, - negative: Optional[Sequence[RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - collection = self._get_collection(collection_name) - return collection.recommend( - positive=positive, - negative=negative, - query_filter=query_filter, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - strategy=strategy, - ) - - def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - negative: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - **kwargs: Any, - ) -> types.GroupsResult: - collection = self._get_collection(collection_name) - with_lookup_collection = None - if with_lookup is not None: - if isinstance(with_lookup, str): - with_lookup_collection = self._get_collection(with_lookup) - else: - with_lookup_collection = self._get_collection(with_lookup.collection) - - return collection.recommend_groups( - positive=positive, - negative=negative, - group_by=group_by, - group_size=group_size, - query_filter=query_filter, - limit=limit, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - with_lookup=with_lookup, - with_lookup_collection=with_lookup_collection, - strategy=strategy, - ) - - def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - collection = self._get_collection(collection_name) - return collection.discover( - target=target, - context=context, - query_filter=query_filter, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - using=using, - lookup_from_collection=self._get_collection(lookup_from.collection) - if lookup_from - else None, - lookup_from_vector_name=lookup_from.vector if lookup_from else None, - ) - - def discover_batch( - self, - collection_name: str, - requests: Sequence[types.DiscoverRequest], - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - collection = self._get_collection(collection_name) - - return [ - collection.discover( - target=request.target, - context=request.context, - query_filter=request.filter, - limit=request.limit, - offset=request.offset, - with_payload=request.with_payload, - with_vectors=request.with_vector, - using=request.using, - lookup_from_collection=self._get_collection(request.lookup_from.collection) - if request.lookup_from - else None, - lookup_from_vector_name=request.lookup_from.vector - if request.lookup_from - else None, - ) - for request in requests - ] - def scroll( self, collection_name: str, @@ -771,20 +541,25 @@ def facet( return collection.facet(key=key, facet_filter=facet_filter, limit=limit) def upsert( - self, collection_name: str, points: types.Points, **kwargs: Any + self, + collection_name: str, + points: types.Points, + update_filter: Optional[types.Filter] = None, + **kwargs: Any, ) -> types.UpdateResult: collection = self._get_collection(collection_name) - collection.upsert(points) + collection.upsert(points, update_filter=update_filter) return self._default_update_result() def update_vectors( self, collection_name: str, points: Sequence[types.PointVectors], + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: collection = self._get_collection(collection_name) - collection.update_vectors(points) + collection.update_vectors(points, update_filter=update_filter) return self._default_update_result() def delete_vectors( @@ -951,16 +726,25 @@ def update_collection( self, collection_name: str, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: _collection = self._get_collection(collection_name) - + updated = False if sparse_vectors_config is not None: for vector_name, vector_params in sparse_vectors_config.items(): _collection.update_sparse_vectors_config(vector_name, vector_params) + updated = True - return True - return False + if metadata is not None: + if _collection.config.metadata is not None: + _collection.config.metadata.update(metadata) + else: + _collection.config.metadata = deepcopy(metadata) + updated = True + + self._save() + return updated def _collection_path(self, collection_name: str) -> Optional[str]: if self.persistent: @@ -991,21 +775,13 @@ def create_collection( vectors_config: Optional[ Union[types.VectorParams, Mapping[str, types.VectorParams]] ] = None, - init_from: Optional[types.InitFrom] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: if self.closed: raise RuntimeError("QdrantLocal instance is closed. Please create a new instance.") - src_collection = None - from_collection_name = None - if init_from is not None: - from_collection_name = ( - init_from if isinstance(init_from, str) else init_from.collection - ) - src_collection = self._get_collection(from_collection_name) - if collection_name in self.collections: raise ValueError(f"Collection {collection_name} already exists") collection_path = self._collection_path(collection_name) @@ -1016,25 +792,13 @@ def create_collection( rest_models.CreateCollection( vectors=vectors_config or {}, sparse_vectors=sparse_vectors_config, + metadata=deepcopy(metadata), ), location=collection_path, force_disable_check_same_thread=self.force_disable_check_same_thread, ) self.collections[collection_name] = collection - if src_collection and from_collection_name: - batch_size = 100 - records, next_offset = self.scroll(from_collection_name, limit=2, with_vectors=True) - self.upload_records( - collection_name, records - ) # it is not crucial to replace upload_records here - # since it is an internal usage, and we don't have custom shard keys in qdrant local - while next_offset is not None: - records, next_offset = self.scroll( - from_collection_name, offset=next_offset, limit=batch_size, with_vectors=True - ) - self.upload_records(collection_name, records) - self._save() return True @@ -1042,30 +806,30 @@ def recreate_collection( self, collection_name: str, vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]], - init_from: Optional[types.InitFrom] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: self.delete_collection(collection_name) return self.create_collection( - collection_name, vectors_config, init_from, sparse_vectors_config + collection_name, vectors_config, sparse_vectors_config, metadata=metadata ) def upload_points( - self, collection_name: str, points: Iterable[types.PointStruct], **kwargs: Any - ) -> None: - self._upload_points(collection_name, points) - - def upload_records( - self, collection_name: str, records: Iterable[types.Record], **kwargs: Any + self, + collection_name: str, + points: Iterable[types.PointStruct], + update_filter: Optional[types.Filter] = None, + **kwargs: Any, ) -> None: - # upload_records in local mode behaves like upload_records with wait=True in server mode - self._upload_points(collection_name, records) + # upload_points in local mode behaves like upload_points with wait=True in server mode + self._upload_points(collection_name, points, update_filter=update_filter) def _upload_points( self, collection_name: str, points: Iterable[Union[types.PointStruct, types.Record]], + update_filter: Optional[types.Filter] = None, ) -> None: collection = self._get_collection(collection_name) collection.upsert( @@ -1076,7 +840,8 @@ def _upload_points( payload=point.payload or {}, ) for point in points - ] + ], + update_filter=update_filter, ) def upload_collection( @@ -1087,6 +852,7 @@ def upload_collection( ], payload: Optional[Iterable[dict[Any, Any]]] = None, ids: Optional[Iterable[types.PointId]] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: # upload_collection in local mode behaves like upload_collection with wait=True in server mode @@ -1110,7 +876,7 @@ def uuid_generator() -> Generator[str, None, None]: collection.upsert( [ rest_models.PointStruct( - id=point_id, + id=str(point_id) if isinstance(point_id, uuid.UUID) else point_id, vector=(vector.tolist() if isinstance(vector, np.ndarray) else vector) or {}, payload=payload or {}, ) @@ -1119,7 +885,8 @@ def uuid_generator() -> Generator[str, None, None]: iter(vectors), payload or itertools.cycle([{}]), ) - ] + ], + update_filter=update_filter, ) def create_payload_index( @@ -1214,22 +981,6 @@ def recover_shard_snapshot( "Snapshots are not supported in the local Qdrant. Please use server Qdrant if you need snapshots." ) - def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError( - "Locks are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots." - ) - - def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - raise NotImplementedError( - "Locks are not supported in the local Qdrant. Please use server Qdrant if you need full snapshots." - ) - - def get_locks(self, **kwargs: Any) -> types.LocksOption: - return types.LocksOption( - error_message=None, - write=False, - ) - def create_shard_key( self, collection_name: str, diff --git a/qdrant_client/migrate/migrate.py b/qdrant_client/migrate/migrate.py index 2ce4dd45..aabfa872 100644 --- a/qdrant_client/migrate/migrate.py +++ b/qdrant_client/migrate/migrate.py @@ -1,5 +1,5 @@ import time -from typing import Iterable, Optional, Any +from typing import Iterable, Optional from qdrant_client._pydantic_compat import to_dict, model_fields from qdrant_client.client_base import QdrantBase @@ -170,9 +170,6 @@ def _migrate_collection( """ records, next_offset = source_client.scroll(collection_name, limit=2, with_vectors=True) upload_with_retry(client=dest_client, collection_name=collection_name, points=records) # type: ignore - # upload_records has been deprecated due to the usage of models.Record; models.Record has been deprecated as a - # structure for uploading due to a `shard_key` field, and now is used only as a result structure. - # since shard_keys are not supported in migration, we can safely type ignore here and use Records for uploading while next_offset is not None: records, next_offset = source_client.scroll( collection_name, offset=next_offset, limit=batch_size, with_vectors=True diff --git a/qdrant_client/proto/collections.proto b/qdrant_client/proto/collections.proto index da376673..6aeedd1e 100644 --- a/qdrant_client/proto/collections.proto +++ b/qdrant_client/proto/collections.proto @@ -3,6 +3,8 @@ package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; +import "json_with_int.proto"; + enum Datatype { Default = 0; Float32 = 1; @@ -160,6 +162,10 @@ message OptimizerStatus { string error = 2; } +message CollectionWarning { + string message = 1; +} + message HnswConfigDiff { /* Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. @@ -170,10 +176,12 @@ message HnswConfigDiff { */ optional uint64 ef_construct = 2; /* - Minimal size (in KiloBytes) of vectors for additional payload-based indexing. - If the payload chunk is smaller than `full_scan_threshold` additional indexing won't be used - - in this case full-scan search should be preferred by query planner and additional indexing is not required. - Note: 1 Kb = 1 vector of size 256 + Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search. + This measures the total size of vectors being queried against. + When the maximum estimated amount of points that a condition satisfies is smaller than + `full_scan_threshold`, the query planner will use full-scan search instead of HNSW index + traversal for better performance. + Note: 1Kb = 1 vector of size 256 */ optional uint64 full_scan_threshold = 3; /* @@ -191,6 +199,13 @@ message HnswConfigDiff { Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. */ optional uint64 payload_m = 6; + /* + Store copies of original and quantized vectors within the HNSW index file. Default: false. + Enabling this option will trade the search speed for disk usage by reducing amount of + random seeks during the search. + Requires quantized vectors to be enabled. Multi-vectors are not supported. + */ + optional bool inline_storage = 7; } message SparseIndexConfig { @@ -212,6 +227,7 @@ message SparseIndexConfig { message WalConfigDiff { optional uint64 wal_capacity_mb = 1; // Size of a single WAL block file optional uint64 wal_segments_ahead = 2; // Number of segments to create in advance + optional uint64 wal_retain_closed = 3; // Number of closed segments to retain } message OptimizersConfigDiff { @@ -235,6 +251,8 @@ message OptimizersConfigDiff { */ optional uint64 default_segment_number = 3; /* + Deprecated: + Do not create segments larger this size (in kilobytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. @@ -348,25 +366,25 @@ enum ShardingMethod { } message StrictModeConfig { - optional bool enabled = 1; - optional uint32 max_query_limit = 2; - optional uint32 max_timeout = 3; - optional bool unindexed_filtering_retrieve = 4; - optional bool unindexed_filtering_update = 5; - - optional uint32 search_max_hnsw_ef = 6; - optional bool search_allow_exact = 7; - optional float search_max_oversampling = 8; - optional uint64 upsert_max_batchsize = 9; - optional uint64 max_collection_vector_size_bytes = 10; + optional bool enabled = 1; // Whether strict mode is enabled for a collection or not. + optional uint32 max_query_limit = 2; // Max allowed `limit` parameter for all APIs that don't have their own max limit. + optional uint32 max_timeout = 3; // Max allowed `timeout` parameter. + optional bool unindexed_filtering_retrieve = 4; // Allow usage of unindexed fields in retrieval based (e.g. search) filters. + optional bool unindexed_filtering_update = 5; // Allow usage of unindexed fields in filtered updates (e.g. delete by payload). + optional uint32 search_max_hnsw_ef = 6; // Max HNSW ef value allowed in search parameters. + optional bool search_allow_exact = 7; // Whether exact search is allowed. + optional float search_max_oversampling = 8; // Max oversampling value allowed in search + optional uint64 upsert_max_batchsize = 9; // Max batchsize when upserting + optional uint64 max_collection_vector_size_bytes = 10; // Max size of a collections vector storage in bytes, ignoring replicas. optional uint32 read_rate_limit = 11; // Max number of read operations per minute per replica optional uint32 write_rate_limit = 12; // Max number of write operations per minute per replica - optional uint64 max_collection_payload_size_bytes = 13; - optional uint64 filter_max_conditions = 14; - optional uint64 condition_max_size = 15; - optional StrictModeMultivectorConfig multivector_config = 16; - optional StrictModeSparseConfig sparse_config = 17; - optional uint64 max_points_count = 18; + optional uint64 max_collection_payload_size_bytes = 13; // Max size of a collections payload storage in bytes, ignoring replicas. + optional uint64 filter_max_conditions = 14; // Max conditions a filter can have. + optional uint64 condition_max_size = 15; // Max size of a condition, eg. items in `MatchAny`. + optional StrictModeMultivectorConfig multivector_config = 16; // Multivector strict mode configuration + optional StrictModeSparseConfig sparse_config = 17; // Sparse vector strict mode configuration + optional uint64 max_points_count = 18; // Max number of points estimated in a collection + optional uint64 max_payload_index_count = 19; // Max number of payload indexes in a collection } message StrictModeSparseConfig { @@ -374,7 +392,7 @@ message StrictModeSparseConfig { } message StrictModeSparse { - optional uint64 max_length = 10; + optional uint64 max_length = 10; // Max length of sparse vector } message StrictModeMultivectorConfig { @@ -382,7 +400,7 @@ message StrictModeMultivectorConfig { } message StrictModeMultivector { - optional uint64 max_vectors = 1; + optional uint64 max_vectors = 1; // Max number of vectors in a multivector } message CreateCollection { @@ -398,11 +416,12 @@ message CreateCollection { optional VectorsConfig vectors_config = 10; // Configuration for vectors optional uint32 replication_factor = 11; // Number of replicas of each shard that network tries to maintain, default = 1 optional uint32 write_consistency_factor = 12; // How many replicas should apply the operation for us to consider it successful, default = 1 - optional string init_from_collection = 13; // Specify name of the other collection to copy data from + reserved 13; // Deprecated: init_from optional QuantizationConfig quantization_config = 14; // Quantization configuration of vector optional ShardingMethod sharding_method = 15; // Sharding method optional SparseVectorConfig sparse_vectors_config = 16; // Configuration for sparse vectors optional StrictModeConfig strict_mode_config = 17; // Configuration for strict mode + map metadata = 18; // Arbitrary JSON metadata for the collection } message UpdateCollection { @@ -415,6 +434,7 @@ message UpdateCollection { optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters optional StrictModeConfig strict_mode_config = 9; // New strict mode configuration + map metadata = 10; // Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata } message DeleteCollection { @@ -454,6 +474,7 @@ message CollectionConfig { WalConfigDiff wal_config = 4; // Configuration of the Write-Ahead-Log optional QuantizationConfig quantization_config = 5; // Configuration of the vector quantization optional StrictModeConfig strict_mode_config = 6; // Configuration of strict mode. + map metadata = 7; // Arbitrary JSON metadata for the collection } enum TokenizerType { @@ -499,6 +520,7 @@ message TextIndexParams { optional StopwordsSet stopwords = 6; // Stopwords for the text index optional bool phrase_matching = 7; // If true - support phrase matching. optional StemmingAlgorithm stemmer = 8; // Set an algorithm for stemming. + optional bool ascii_folding = 9; // If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false. } message StemmingAlgorithm { @@ -547,7 +569,7 @@ message PayloadSchemaInfo { message CollectionInfo { CollectionStatus status = 1; // operating condition of the collection OptimizerStatus optimizer_status = 2; // status of collection optimizers - optional uint64 vectors_count = 3; // Approximate number of vectors in the collection + reserved 3; // Deprecated uint64 segments_count = 4; // Number of independent segments reserved 5; // Deprecated reserved 6; // Deprecated @@ -555,6 +577,7 @@ message CollectionInfo { map payload_schema = 8; // Collection data types optional uint64 points_count = 9; // Approximate number of points in the collection optional uint64 indexed_vectors_count = 10; // Approximate number of indexed vectors in the collection. + repeated CollectionWarning warnings = 11; // Warnings related to the collection } message ChangeAliases { @@ -614,6 +637,7 @@ enum ReplicaState { Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true Resharding = 7; // Points are being migrated to this shard as part of scale-up resharding ReshardingScaleDown = 8; // Points are being migrated to this shard as part of scale-down resharding + ActiveRead = 9; // Active for readers, Partial for writers } message ShardKey { @@ -717,6 +741,7 @@ message CreateShardKey { optional uint32 shards_number = 2; // Number of shards to create per shard key optional uint32 replication_factor = 3; // Number of replicas of each shard to create repeated uint64 placement = 4; // List of peer ids, allowed to create shards. If empty - all peers are allowed + optional ReplicaState initial_state = 5; // Initial state of created replicas. Warning: use with care. } message DeleteShardKey { diff --git a/qdrant_client/proto/points.proto b/qdrant_client/proto/points.proto index 921e85a9..74e18ab0 100644 --- a/qdrant_client/proto/points.proto +++ b/qdrant_client/proto/points.proto @@ -64,11 +64,10 @@ message InferenceObject { map options = 3; // Model options } -// Legacy vector format, which determines the vector type by the configuration of its fields. message Vector { - repeated float data = 1; // Vector data (flatten for multi vectors), deprecated - optional SparseIndices indices = 2; // Sparse indices for sparse vectors, deprecated - optional uint32 vectors_count = 3; // Number of vectors per multi vector, deprecated + repeated float data = 1 [deprecated=true]; // Vector data (flatten for multi vectors), deprecated + optional SparseIndices indices = 2 [deprecated=true]; // Sparse indices for sparse vectors, deprecated + optional uint32 vectors_count = 3 [deprecated=true]; // Number of vectors per multi vector, deprecated oneof vector { DenseVector dense = 101; // Dense vector SparseVector sparse = 102; // Sparse vector @@ -80,9 +79,9 @@ message Vector { } message VectorOutput { - repeated float data = 1; // Vector data (flatten for multi vectors), deprecated - optional SparseIndices indices = 2; // Sparse indices for sparse vectors, deprecated - optional uint32 vectors_count = 3; // Number of vectors per multi vector, deprecated + repeated float data = 1 [deprecated=true]; // Vector data (flatten for multi vectors), deprecated + optional SparseIndices indices = 2 [deprecated=true]; // Sparse indices for sparse vectors, deprecated + optional uint32 vectors_count = 3 [deprecated=true]; // Number of vectors per multi vector, deprecated oneof vector { DenseVector dense = 101; // Dense vector SparseVector sparse = 102; // Sparse vector @@ -122,6 +121,7 @@ message VectorInput { message ShardKeySelector { repeated ShardKey shard_keys = 1; // List of shard keys which should be used in the request + optional ShardKey fallback = 2; } @@ -135,6 +135,7 @@ message UpsertPoints { repeated PointStruct points = 3; optional WriteOrdering ordering = 4; // Write ordering guarantees optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys + optional Filter update_filter = 6; // If specified, only points that match this filter will be updated, others will be inserted } message DeletePoints { @@ -162,6 +163,7 @@ message UpdatePointVectors { repeated PointVectors points = 3; // List of points and vectors to update optional WriteOrdering ordering = 4; // Write ordering guarantees optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys + optional Filter update_filter = 6; // If specified, only points that match this filter will be updated } message PointVectors { @@ -306,6 +308,28 @@ message QuantizationSearchParams { optional double oversampling = 3; } +message AcornSearchParams { + /* + If true, then ACORN may be used for the HNSW search based on filters + selectivity. + + Improves search recall for searches with multiple low-selectivity + payload filters, at cost of performance. + */ + optional bool enable = 1; + + /* + Maximum selectivity of filters to enable ACORN. + + If estimated filters selectivity is higher than this value, + ACORN will not be used. Selectivity is estimated as: + `estimated number of points satisfying the filters / total number of points`. + + 0.0 for never, 1.0 for always. Default is 0.4. + */ + optional double max_selectivity = 2; +} + message SearchParams { /* Params relevant to HNSW index. Size of the beam in a beam-search. @@ -328,6 +352,11 @@ message SearchParams { guarantee that all uploaded vectors will be included in search results */ optional bool indexed_only = 4; + + /* + ACORN search params + */ + optional AcornSearchParams acorn = 5; } message SearchPoints { @@ -563,7 +592,7 @@ message ContextInput { } enum Fusion { - RRF = 0; // Reciprocal Rank Fusion + RRF = 0; // Reciprocal Rank Fusion (with default parameters) DBSF = 1; // Distribution-Based Score Fusion } @@ -636,7 +665,7 @@ message DecayParamsExpression { optional Expression target = 2; // The scale factor of the decay, in terms of `x`. Defaults to 1.0. Must be a non-zero positive number. optional float scale = 3; - // The midpoint of the decay. Defaults to 0.5. Output will be this value when `|x - target| == scale`. + // The midpoint of the decay. Should be between 0 and 1. Defaults to 0.5. Output will be this value when `|x - target| == scale`. optional float midpoint = 4; } @@ -667,6 +696,11 @@ message Mmr { optional uint32 candidates_limit = 3; } +// Parameterized reciprocal rank fusion +message Rrf { + optional uint32 k = 1; // K parameter for reciprocal rank fusion +} + message Query { oneof variant { VectorInput nearest = 1; // Find the nearest neighbors to this vector. @@ -678,6 +712,7 @@ message Query { Sample sample = 7; // Sample points from the collection. Formula formula = 8; // Score boosting via an arbitrary formula NearestInputWithMmr nearest_with_mmr = 9; // Search nearest neighbors, but re-rank based on the Maximal Marginal Relevance algorithm. + Rrf rrf = 10; // Parameterized reciprocal rank fusion } } @@ -794,6 +829,7 @@ message PointsUpdateOperation { message PointStructList { repeated PointStruct points = 1; optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys + optional Filter update_filter = 3; // If specified, only points that match this filter will be updated, others will be inserted } message SetPayload { map payload = 1; @@ -815,6 +851,7 @@ message PointsUpdateOperation { message UpdateVectors { repeated PointVectors points = 1; // List of points and vectors to update optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys + optional Filter update_filter = 3; // If specified, only points that match this filter will be updated } message DeleteVectors { PointsSelector points_selector = 1; // Affected points @@ -1023,6 +1060,7 @@ message UpdateBatchResponse { message FacetResponse { repeated FacetHit hits = 1; double time = 2; // Time spent to process + optional Usage usage = 3; } message SearchMatrixPairsResponse { @@ -1110,6 +1148,7 @@ message Match { RepeatedIntegers except_integers = 7; // Match any other value except those integers RepeatedStrings except_keywords = 8; // Match any other value except those keywords string phrase = 9; // Match phrase text + string text_any = 10; // Match any word in the text } } diff --git a/qdrant_client/qdrant_client.py b/qdrant_client/qdrant_client.py index ae73d696..114e5d99 100644 --- a/qdrant_client/qdrant_client.py +++ b/qdrant_client/qdrant_client.py @@ -205,23 +205,6 @@ def grpc_points(self) -> grpc.PointsStub: raise NotImplementedError(f"gRPC client is not supported for {type(self._client)}") - @property - def rest(self) -> SyncApis[ApiClient]: - """REST Client - - Returns: - An instance of raw REST API client, generated from OpenAPI schema - """ - warnings.warn( - "The 'rest' property is deprecated and will be removed in a future version. Use `http` instead.", - DeprecationWarning, - stacklevel=2, - ) - if isinstance(self._client, QdrantRemote): - return self._client.rest - - raise NotImplementedError(f"REST client is not supported for {type(self._client)}") - @property def http(self) -> SyncApis[ApiClient]: """REST Client @@ -243,163 +226,6 @@ def init_options(self) -> dict[str, Any]: """ return self._init_options - def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - timeout: Optional[int] = None, - consistency: Optional[types.ReadConsistency] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - """Perform multiple searches in a collection mitigating network overhead - - Args: - collection_name: Name of the collection - requests: List of search requests - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of search responses - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search_batch` method is deprecated and will be removed in the future." - " Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.search_batch( - collection_name=collection_name, - requests=requests, - consistency=consistency, - timeout=timeout, - **kwargs, - ) - - def search( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - append_payload: bool = True, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - """Search for closest vectors in collection taking into account filtering conditions - - Args: - collection_name: Collection to search in - query_vector: - Search for vectors closest to this. - Can be either a vector itself, or a named vector, or a named sparse vector, or a tuple of vector name and vector itself - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many results return - offset: - Offset of the first result to return. - May be used to paginate results. - Note: large offset values may cause performance issues. - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - append_payload: Same as `with_payload`. Deprecated. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - timeout: - Overrides global timeout for this search. Unit is seconds. - - Examples: - - `Search with filter`:: - - qdrant.search( - collection_name="test_collection", - query_vector=[1.0, 0.1, 0.2, 0.7], - query_filter=Filter( - must=[ - FieldCondition( - key='color', - range=Match( - value="red" - ) - ) - ] - ) - ) - - Returns: - List of found close points with similarity scores. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search` method is deprecated and will be removed in the future." - " Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.search( - collection_name=collection_name, - query_vector=query_vector, - query_filter=query_filter, - search_params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - append_payload=append_payload, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - def query_batch_points( self, collection_name: str, @@ -764,293 +590,25 @@ def query_points_groups( self._embed_models( prefetch, is_query=True, batch_size=self.local_inference_batch_size ) - ) - ) - - return self._client.query_points_groups( - collection_name=collection_name, - query=query, - prefetch=prefetch, - query_filter=query_filter, - search_params=search_params, - group_by=group_by, - limit=limit, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - using=using, - with_lookup=with_lookup, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - def search_groups( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - group_by: str, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - """Search for closest vectors grouped by payload field. - - Searches best matches for query vector grouped by the value of payload field. - Useful to obtain most relevant results for each category, deduplicate results, - finding the best representation vector for the same entity. - - Args: - collection_name: Collection to search in - query_vector: - Search for vectors closest to this. - Can be either a vector itself, or a named vector, or a named sparse vector, or a tuple of vector name and vector itself - group_by: Name of the payload field to group by. - Field must be of type "keyword" or "integer". - Nested fields are specified using dot notation, e.g. "nested_field.subfield". - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many groups return - group_size: How many results return for each group - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: Minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - with_lookup: - Look for points in another collection using the group ids. - If specified, each group will contain a record from the specified collection - with the same id as the group id. In addition, the parameter allows to specify - which parts of the record should be returned, like in `with_payload` and `with_vectors` parameters. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. - Values: - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of groups with not more than `group_size` hits in each group. - Each group also contains an id of the group, which is the value of the payload field. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`search_groups` method is deprecated and will be removed in the future." - " Use `query_points_groups` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.search_groups( - collection_name=collection_name, - query_vector=query_vector, - group_by=group_by, - query_filter=query_filter, - search_params=search_params, - limit=limit, - group_size=group_size, - with_payload=with_payload, - with_vectors=with_vectors, - score_threshold=score_threshold, - with_lookup=with_lookup, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - """Perform multiple recommend requests in batch mode - - Args: - collection_name: Name of the collection - requests: List of recommend requests - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of recommend responses - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend_batch` method is deprecated and will be removed in the future." - " Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.recommend_batch( - collection_name=collection_name, - requests=requests, - consistency=consistency, - timeout=timeout, - **kwargs, - ) - - def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - """Recommend points: search for similar points based on already stored in Qdrant examples. - - Provide IDs of the stored points, and Qdrant will perform search based on already existing vectors. - This functionality is especially useful for recommendation over existing collection of points. - - Args: - collection_name: Collection to search in - positive: - List of stored point IDs or vectors, which should be used as reference for similarity search. - If there is only one example - this request is equivalent to the regular search with vector of that - point. - If there are more than one example, Qdrant will attempt to search for similar to all of them. - Recommendation for multiple vectors is experimental. - Its behaviour may change depending on selected strategy. - negative: - List of stored point IDs or vectors, which should be dissimilar to the search result. - Negative examples is an experimental functionality. - Its behaviour may change depending on selected strategy. - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many results return - offset: - Offset of the first result to return. - May be used to paginate results. - Note: large offset values may cause performance issues. - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - using: - Name of the vectors to use for recommendations. - If `None` - use default vectors. - lookup_from: - Defines a location (collection and vector field name), used to lookup vectors for recommendations. - If `None` - current collection will be used. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - strategy: - Strategy to use for recommendation. - Strategy defines how to combine multiple examples into a recommendation query. - Possible values: - - - 'average_vector' - calculates average vector of all examples and uses it for search - - 'best_score' - finds the result which is closer to positive examples and further from negative - timeout: - Overrides global timeout for this search. Unit is seconds. + ) + ) - Returns: - List of recommended points with similarity scores. - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend` method is deprecated and will be removed in the future." - " Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.recommend( + return self._client.query_points_groups( collection_name=collection_name, - positive=positive, - negative=negative, + query=query, + prefetch=prefetch, query_filter=query_filter, search_params=search_params, + group_by=group_by, limit=limit, - offset=offset, + group_size=group_size, with_payload=with_payload, with_vectors=with_vectors, score_threshold=score_threshold, using=using, - lookup_from=lookup_from, + with_lookup=with_lookup, consistency=consistency, shard_key_selector=shard_key_selector, - strategy=strategy, timeout=timeout, **kwargs, ) @@ -1151,265 +709,6 @@ def search_matrix_offsets( **kwargs, ) - def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - """Recommend point groups: search for similar points based on already stored in Qdrant examples - and groups by payload field. - - Recommend best matches for given stored examples grouped by the value of payload field. - Useful to obtain most relevant results for each category, deduplicate results, - finding the best representation vector for the same entity. - - Args: - collection_name: Collection to search in - positive: - List of stored point IDs or vectors, which should be used as reference for similarity search. - If there is only one example - this request is equivalent to the regular search with vector of that - point. - If there are more than one example, Qdrant will attempt to search for similar to all of them. - Recommendation for multiple vectors is experimental. - Its behaviour may change depending on selected strategy. - negative: - List of stored point IDs or vectors, which should be dissimilar to the search result. - Negative examples is an experimental functionality. - Its behaviour may change depending on selected strategy. - group_by: Name of the payload field to group by. - Field must be of type "keyword" or "integer". - Nested fields are specified using dot notation, e.g. "nested_field.subfield". - query_filter: - - Exclude vectors which doesn't fit given conditions. - - If `None` - search among all vectors - search_params: Additional search params - limit: How many groups return - group_size: How many results return for each group - with_payload: - - Specify which stored payload should be attached to the result. - - If `True` - attach all payload - - If `False` - do not attach any payload - - If List of string - include only specified fields - - If `PayloadSelector` - use explicit rules - with_vectors: - - If `True` - Attach stored vector to the search result. - - If `False` - Do not attach vector. - - If List of string - include only specified fields - - Default: `False` - score_threshold: - Define a minimal score threshold for the result. - If defined, less similar results will not be returned. - Score of the returned result might be higher or smaller than the threshold depending - on the Distance function used. - E.g. for cosine similarity only higher scores will be returned. - using: - Name of the vectors to use for recommendations. - If `None` - use default vectors. - lookup_from: - Defines a location (collection and vector field name), used to lookup vectors for recommendations. - If `None` - current collection will be used. - with_lookup: - Look for points in another collection using the group ids. - If specified, each group will contain a record from the specified collection - with the same id as the group id. In addition, the parameter allows to specify - which parts of the record should be returned, like in `with_payload` and `with_vectors` parameters. - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - strategy: - Strategy to use for recommendation. - Strategy defines how to combine multiple examples into a recommendation query. - Possible values: - - - 'average_vector' - calculates average vector of all examples and uses it for search - - 'best_score' - finds the result which is closer to positive examples and further from negative - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of groups with not more than `group_size` hits in each group. - Each group also contains an id of the group, which is the value of the payload field. - - """ - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - warnings.warn( - "`recommend_groups` method is deprecated and will be removed in the future." - " Use `query_points_groups` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.recommend_groups( - collection_name=collection_name, - group_by=group_by, - positive=positive, - negative=negative, - query_filter=query_filter, - search_params=search_params, - limit=limit, - group_size=group_size, - score_threshold=score_threshold, - with_payload=with_payload, - with_vectors=with_vectors, - using=using, - lookup_from=lookup_from, - with_lookup=with_lookup, - strategy=strategy, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - """ - Use context and a target to find the most similar points, constrained by the context. - - Args: - collection_name: Collection to discover in - - target: - Look for vectors closest to this. - - When using the target (with or without context), the integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. - - context: - Pairs of { positive, negative } examples to constrain the search. - - When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of finding a point closer to a negative than to a positive part of a pair. - - Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. - - For discovery search (when including a target), the context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. - - query_filter: - Look only for points which satisfies this conditions - - search_params: - Additional search params - - limit: - Max number of result to return - - offset: - Offset of the first result to return. May be used to paginate results. Note: large offset values may cause performance issues. - - with_payload: - Select which payload to return with the response. Default: None - - with_vectors: - Whether to return the point vector with the result? - - using: - Define which vector to use for recommendation, if not specified - try to use default vector. - - lookup_from: - The location used to lookup vectors. If not specified - use current collection. Note: the other collection should have the same vector size as the current collection. - - consistency: - Read consistency of the search. Defines how many replicas should be queried before returning the result. Values: - - - int - number of replicas to query, values should present in all queried replicas - - 'majority' - query all replicas, but return values present in the majority of replicas - - 'quorum' - query the majority of replicas, return values present in all of them - - 'all' - query all replicas, and return values present in all replicas - - shard_key_selector: - This parameter allows to specify which shards should be queried. - If `None` - query all shards. Only works for collections with `custom` sharding method. - - timeout: - Overrides global timeout for this search. Unit is seconds. - - Returns: - List of discovered points with discovery or context scores, accordingly. - """ - warnings.warn( - "`discover` method is deprecated and will be removed in the future." - " Use `query_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.discover( - collection_name=collection_name, - target=target, - context=context, - query_filter=query_filter, - search_params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vectors=with_vectors, - using=using, - lookup_from=lookup_from, - consistency=consistency, - shard_key_selector=shard_key_selector, - timeout=timeout, - **kwargs, - ) - - def discover_batch( - self, - collection_name: str, - requests: Sequence[types.DiscoverRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - warnings.warn( - "`discover_batch` method is deprecated and will be removed in the future." - " Use `query_batch_points` instead.", - DeprecationWarning, - stacklevel=2, - ) - return self._client.discover_batch( - collection_name=collection_name, - requests=requests, - consistency=consistency, - timeout=timeout, - **kwargs, - ) - def scroll( self, collection_name: str, @@ -1579,6 +878,7 @@ def upsert( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: """ @@ -1604,6 +904,8 @@ def upsert( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: If specified, only points that match this filter will be updated, others will be inserted + Returns: Operation Result(UpdateResult) """ @@ -1646,6 +948,7 @@ def upsert( wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, **kwargs, ) @@ -1656,6 +959,7 @@ def update_vectors( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: """Update specified vectors in the collection. Keeps payload and unspecified vectors unchanged. @@ -1684,6 +988,9 @@ def update_vectors( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: + If specified, only points that match this filter will be updated + Returns: Operation Result(UpdateResult) """ @@ -1702,6 +1009,7 @@ def update_vectors( wait=wait, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def delete_vectors( @@ -2256,6 +1564,7 @@ def update_collection( timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Update parameters of the collection @@ -2272,6 +1581,7 @@ def update_collection( If timeout is reached - request will return with service error. sparse_vectors_config: Override for sparse vector-specific configuration strict_mode_config: Override for strict mode configuration + metadata: Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata Returns: Operation result """ @@ -2295,6 +1605,7 @@ def update_collection( timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) @@ -2334,9 +1645,9 @@ def create_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Create empty collection with given parameters @@ -2378,11 +1689,11 @@ def create_collection( optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled - init_from: Use data stored in another collection to initialize this collection timeout: Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error. strict_mode_config: Configure limitations for the collection, such as max size, rate limits, etc. + metadata: Arbitrary JSON-like metadata for the collection Returns: Operation result @@ -2401,10 +1712,10 @@ def create_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) @@ -2424,9 +1735,9 @@ def recreate_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: """Delete and create empty collection with given parameters @@ -2468,11 +1779,11 @@ def recreate_collection( optimizers_config: Params for optimizer wal_config: Params for Write-Ahead-Log quantization_config: Params for quantization, if None - quantization will be disabled - init_from: Use data stored in another collection to initialize this collection timeout: Wait for operation commit timeout in seconds. If timeout is reached - request will return with service error. strict_mode_config: Configure limitations for the collection, such as max size, rate limits, etc. + metadata: Arbitrary JSON metadata for the collection Returns: Operation result @@ -2498,67 +1809,13 @@ def recreate_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, **kwargs, ) - def upload_records( - self, - collection_name: str, - records: Iterable[types.Record], - batch_size: int = 64, - parallel: int = 1, - method: Optional[str] = None, - max_retries: int = 3, - wait: bool = False, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> None: - """Upload records to the collection - - Similar to `upload_collection` method, but operates with records, rather than vector and payload individually. - - Args: - collection_name: Name of the collection to upload to - records: Iterator over records to upload - batch_size: How many vectors upload per-request, Default: 64 - parallel: Number of parallel processes of upload - method: Start method for parallel processes, Default: forkserver - max_retries: maximum number of retries in case of a failure - during the upload of a batch - wait: - Await for the results to be applied on the server side. - If `true`, each update request will explicitly wait for the confirmation of completion. Might be slower. - If `false`, each update request will return immediately after the confirmation of receiving. - Default: `false` - shard_key_selector: Defines the shard groups that should be used to write updates into. - If multiple shard_keys are provided, the update will be written to each of them. - Only works for collections with `custom` sharding method. - This parameter overwrites shard keys written in the records. - - """ - warnings.warn( - "`upload_records` is deprecated, use `upload_points` instead", - DeprecationWarning, - stacklevel=2, - ) - - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - - return self._client.upload_records( - collection_name=collection_name, - records=records, - batch_size=batch_size, - parallel=parallel, - method=method, - max_retries=max_retries, - wait=wait, - shard_key_selector=shard_key_selector, - ) - def upload_points( self, collection_name: str, @@ -2569,6 +1826,7 @@ def upload_points( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: """Upload points to the collection @@ -2592,7 +1850,7 @@ def upload_points( If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. This parameter overwrites shard keys written in the records. - + update_filter: If specified, only points that match this filter will be updated, others will be inserted """ def chain(*iterables: Iterable) -> Iterable: @@ -2625,6 +1883,7 @@ def chain(*iterables: Iterable) -> Iterable: max_retries=max_retries, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def upload_collection( @@ -2643,12 +1902,13 @@ def upload_collection( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: """Upload vectors and payload to the collection. This method will perform automatic batching of the data. If you need to perform a single update, use `upsert` method. - Note: use `upload_records` method if you want to upload multiple vectors with single payload. + Note: use `upload_points` method if you want to upload multiple vectors with single payload. Args: collection_name: Name of the collection to upload to @@ -2668,6 +1928,7 @@ def upload_collection( shard_key_selector: Defines the shard groups that should be used to write updates into. If multiple shard_keys are provided, the update will be written to each of them. Only works for collections with `custom` sharding method. + update_filter: If specified, only points that match this filter will be updated, others will be inserted """ def chain(*iterables: Iterable) -> Iterable: @@ -2703,6 +1964,7 @@ def chain(*iterables: Iterable) -> Iterable: max_retries=max_retries, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def create_payload_index( @@ -3066,24 +2328,6 @@ def recover_shard_snapshot( **kwargs, ) - def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - """Lock storage for writing.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - - return self._client.lock_storage(reason=reason, **kwargs) - - def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - """Unlock storage for writing.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - - return self._client.unlock_storage(**kwargs) - - def get_locks(self, **kwargs: Any) -> types.LocksOption: - """Get current locks state.""" - assert len(kwargs) == 0, f"Unknown arguments: {list(kwargs.keys())}" - - return self._client.get_locks(**kwargs) - def migrate( self, dest_client: QdrantBase, diff --git a/qdrant_client/qdrant_fastembed.py b/qdrant_client/qdrant_fastembed.py index e7cb9f0c..0bf6a61c 100644 --- a/qdrant_client/qdrant_fastembed.py +++ b/qdrant_client/qdrant_fastembed.py @@ -7,7 +7,7 @@ from pydantic import BaseModel from qdrant_client import grpc -from qdrant_client.common.client_warnings import show_warning +from qdrant_client.common.client_warnings import show_warning, show_warning_once from qdrant_client.client_base import QdrantBase from qdrant_client.embed.embedder import Embedder from qdrant_client.embed.model_embedder import ModelEmbedder @@ -562,6 +562,11 @@ def add( List of IDs of added documents. If no ids provided, UUIDs will be randomly generated on client side. """ + show_warning_once( + "`add` method has been deprecated and will be removed in 1.17. " + "Instead, inference can be done internally within regular methods like `upsert` by wrapping " + "data into `models.Document` or `models.Image`." + ) # check if we have fastembed installed encoded_docs = self._embed_documents( @@ -637,13 +642,17 @@ def query( - Exclude vectors which doesn't fit given conditions. - If `None` - search among all vectors limit: How many results return - **kwargs: Additional search parameters. See `qdrant_client.models.SearchRequest` for details. + **kwargs: Additional search parameters. See `qdrant_client.models.QueryRequest` for details. Returns: list[types.ScoredPoint]: List of scored points. """ - + show_warning_once( + "`query` method has been deprecated and will be removed in 1.17. " + "Instead, inference can be done internally within regular methods like `query_points` by wrapping " + "data into `models.Document` or `models.Image`." + ) embedding_model_inst = self._get_or_init_model( model_name=self.embedding_model_name, deprecated=True ) @@ -652,16 +661,15 @@ def query( if self.sparse_embedding_model_name is None: return self._scored_points_to_query_responses( - self.search( + self.query_points( collection_name=collection_name, - query_vector=models.NamedVector( - name=self.get_vector_field_name(), vector=query_vector - ), + query=query_vector, + using=self.get_vector_field_name(), query_filter=query_filter, limit=limit, with_payload=True, **kwargs, - ) + ).points ) sparse_embedding_model_inst = self._get_or_init_sparse_model( @@ -673,32 +681,30 @@ def query( values=sparse_vector.values.tolist(), ) - dense_request = models.SearchRequest( - vector=models.NamedVector( - name=self.get_vector_field_name(), - vector=query_vector, - ), + dense_request = models.QueryRequest( + query=query_vector, + using=self.get_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, **kwargs, ) - sparse_request = models.SearchRequest( - vector=models.NamedSparseVector( - name=self.get_sparse_vector_field_name(), - vector=sparse_query_vector, - ), + sparse_request = models.QueryRequest( + query=sparse_query_vector, + using=self.get_sparse_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, **kwargs, ) - dense_request_response, sparse_request_response = self.search_batch( + dense_request_response, sparse_request_response = self.query_batch_points( collection_name=collection_name, requests=[dense_request, sparse_request] ) return self._scored_points_to_query_responses( - reciprocal_rank_fusion([dense_request_response, sparse_request_response], limit=limit) + reciprocal_rank_fusion( + [dense_request_response.points, sparse_request_response.points], limit=limit + ) ) def query_batch( @@ -723,22 +729,26 @@ def query_batch( - If `None` - search among all vectors This filter will be applied to all search requests. limit: How many results return - **kwargs: Additional search parameters. See `qdrant_client.models.SearchRequest` for details. + **kwargs: Additional search parameters. See `qdrant_client.models.QueryRequest` for details. Returns: list[list[QueryResponse]]: List of lists of responses for each query text. """ + show_warning_once( + "`query_batch` method has been deprecated and will be removed in 1.17. " + "Instead, inference can be done internally within regular methods like `query_batch_points` by wrapping " + "data into `models.Document` or `models.Image`." + ) embedding_model_inst = self._get_or_init_model( model_name=self.embedding_model_name, deprecated=True ) query_vectors = list(embedding_model_inst.query_embed(query=query_texts)) requests = [] for vector in query_vectors: - request = models.SearchRequest( - vector=models.NamedVector( - name=self.get_vector_field_name(), vector=vector.tolist() - ), + request = models.QueryRequest( + query=vector.tolist(), + using=self.get_vector_field_name(), filter=query_filter, limit=limit, with_payload=True, @@ -748,11 +758,13 @@ def query_batch( requests.append(request) if self.sparse_embedding_model_name is None: - responses = self.search_batch( + responses = self.query_batch_points( collection_name=collection_name, requests=requests, ) - return [self._scored_points_to_query_responses(response) for response in responses] + return [ + self._scored_points_to_query_responses(response.points) for response in responses + ] sparse_embedding_model_inst = self._get_or_init_sparse_model( model_name=self.sparse_embedding_model_name, deprecated=True @@ -765,11 +777,9 @@ def query_batch( for sparse_vector in sparse_embedding_model_inst.embed(documents=query_texts) ] for sparse_vector in sparse_query_vectors: - request = models.SearchRequest( - vector=models.NamedSparseVector( - name=self.get_sparse_vector_field_name(), - vector=sparse_vector, - ), + request = models.QueryRequest( + using=self.get_sparse_vector_field_name(), + query=sparse_vector, filter=query_filter, limit=limit, with_payload=True, @@ -778,7 +788,7 @@ def query_batch( requests.append(request) - responses = self.search_batch( + responses = self.query_batch_points( collection_name=collection_name, requests=requests, ) @@ -786,7 +796,7 @@ def query_batch( dense_responses = responses[: len(query_texts)] sparse_responses = responses[len(query_texts) :] responses = [ - reciprocal_rank_fusion([dense_response, sparse_response], limit=limit) + reciprocal_rank_fusion([dense_response.points, sparse_response.points], limit=limit) for dense_response, sparse_response in zip(dense_responses, sparse_responses) ] @@ -819,7 +829,7 @@ def _resolve_query( Raises: ValueError: if query is not of supported type """ - if isinstance(query, get_args(types.Query)) or isinstance(query, grpc.Query): + if isinstance(query, get_args(types.Query)): return query if isinstance(query, types.SparseVector): diff --git a/qdrant_client/qdrant_remote.py b/qdrant_client/qdrant_remote.py index 75f83d8a..0ac295e8 100644 --- a/qdrant_client/qdrant_remote.py +++ b/qdrant_client/qdrant_remote.py @@ -17,7 +17,6 @@ ) import httpx -import numpy as np from grpc import Compression from urllib3.util import Url, parse_url from urllib.parse import urljoin @@ -426,181 +425,6 @@ def http(self) -> SyncApis[ApiClient]: """ return self.openapi_client - def search_batch( - self, - collection_name: str, - requests: Sequence[types.SearchRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - if self._prefer_grpc: - requests = [ - ( - RestToGrpc.convert_search_request(r, collection_name) - if isinstance(r, models.SearchRequest) - else r - ) - for r in requests - ] - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - grpc_res: grpc.SearchBatchResponse = self.grpc_points.SearchBatch( - grpc.SearchBatchPoints( - collection_name=collection_name, - search_points=requests, - read_consistency=consistency, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - (GrpcToRest.convert_search_points(r) if isinstance(r, grpc.SearchPoints) else r) - for r in requests - ] - http_res: Optional[list[list[models.ScoredPoint]]] = ( - self.http.search_api.search_batch_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_request_batch=models.SearchRequestBatch(searches=requests), - ).result - ) - assert http_res is not None, "Search batch returned None" - return http_res - - def search( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: Optional[int] = None, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - append_payload: bool = True, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - if not append_payload: - show_warning_once( - message="Usage of `append_payload` is deprecated. Please consider using `with_payload` instead", - category=DeprecationWarning, - stacklevel=5, - idx="search-append-payload", - ) - with_payload = append_payload - - if isinstance(query_vector, np.ndarray): - query_vector = query_vector.tolist() - - if self._prefer_grpc: - vector_name = None - sparse_indices = None - - if isinstance(query_vector, types.NamedVector): - vector = query_vector.vector - vector_name = query_vector.name - elif isinstance(query_vector, types.NamedSparseVector): - vector_name = query_vector.name - sparse_indices = grpc.SparseIndices(data=query_vector.vector.indices) - vector = query_vector.vector.values - elif isinstance(query_vector, tuple): - vector_name = query_vector[0] - vector = query_vector[1] - else: - vector = list(query_vector) - - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - res: grpc.SearchResponse = self.grpc_points.Search( - grpc.SearchPoints( - collection_name=collection_name, - vector=vector, - vector_name=vector_name, - filter=query_filter, - limit=limit, - offset=offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - read_consistency=consistency, - timeout=timeout, - sparse_indices=sparse_indices, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] - - else: - if isinstance(query_vector, tuple): - query_vector = types.NamedVector(name=query_vector[0], vector=query_vector[1]) - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - - search_result = self.http.search_api.search_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_request=models.SearchRequest( - vector=query_vector, - filter=query_filter, - limit=limit, - offset=offset, - params=search_params, - with_vector=with_vectors, - with_payload=with_payload, - score_threshold=score_threshold, - shard_key=shard_key_selector, - ), - ) - result: Optional[list[types.ScoredPoint]] = search_result.result - assert result is not None, "Search returned None" - return result - def query_points( self, collection_name: str, @@ -690,20 +514,6 @@ def query_points( return models.QueryResponse(points=scored_points) else: - if isinstance(query, grpc.Query): - query = GrpcToRest.convert_query(query) - - if isinstance(prefetch, grpc.PrefetchQuery): - prefetch = GrpcToRest.convert_prefetch_query(prefetch) - - if isinstance(prefetch, list): - prefetch = [ - GrpcToRest.convert_prefetch_query(p) - if isinstance(p, grpc.PrefetchQuery) - else p - for p in prefetch - ] - if isinstance(query_filter, grpc.Filter): query_filter = GrpcToRest.convert_filter(model=query_filter) @@ -780,10 +590,6 @@ def query_batch_points( for r in grpc_res.result ] else: - requests = [ - (GrpcToRest.convert_query_points(r) if isinstance(r, grpc.QueryPoints) else r) - for r in requests - ] http_res: Optional[list[models.QueryResponse]] = ( self.http.search_api.query_batch_points( collection_name=collection_name, @@ -805,767 +611,40 @@ def query_points_groups( list[list[float]], types.SparseVector, types.Query, - types.NumpyArray, - types.Document, - types.Image, - types.InferenceObject, - None, - ] = None, - using: Optional[str] = None, - prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - group_size: int = 3, - with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - lookup_from: Optional[types.LookupLocation] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - if self._prefer_grpc: - if query is not None: - query = RestToGrpc.convert_query(query) - - if isinstance(prefetch, models.Prefetch): - prefetch = [RestToGrpc.convert_prefetch_query(prefetch)] - - if isinstance(prefetch, list): - prefetch = [ - RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p - for p in prefetch - ] - - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - result: grpc.QueryGroupsResponse = self.grpc_points.QueryGroups( - grpc.QueryPointGroups( - collection_name=collection_name, - query=query, - prefetch=prefetch, - filter=query_filter, - limit=limit, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - using=using, - group_by=group_by, - group_size=group_size, - with_lookup=with_lookup, - lookup_from=lookup_from, - timeout=timeout, - shard_key_selector=shard_key_selector, - read_consistency=consistency, - ), - timeout=timeout if timeout is not None else self._timeout, - ).result - return GrpcToRest.convert_groups_result(result) - else: - if isinstance(query, grpc.Query): - query = GrpcToRest.convert_query(query) - - if isinstance(prefetch, grpc.PrefetchQuery): - prefetch = GrpcToRest.convert_prefetch_query(prefetch) - - if isinstance(prefetch, list): - prefetch = [ - GrpcToRest.convert_prefetch_query(p) - if isinstance(p, grpc.PrefetchQuery) - else p - for p in prefetch - ] - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - - query_request = models.QueryGroupsRequest( - shard_key=shard_key_selector, - prefetch=prefetch, - query=query, - using=using, - filter=query_filter, - params=search_params, - score_threshold=score_threshold, - limit=limit, - group_by=group_by, - group_size=group_size, - with_vector=with_vectors, - with_payload=with_payload, - with_lookup=with_lookup, - lookup_from=lookup_from, - ) - - query_result = self.http.search_api.query_points_groups( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - query_groups_request=query_request, - ) - assert query_result is not None, "Query points groups API returned None" - return query_result.result - - def search_groups( - self, - collection_name: str, - query_vector: Union[ - Sequence[float], - tuple[str, list[float]], - types.NamedVector, - types.NamedSparseVector, - types.NumpyArray, - ], - group_by: str, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - score_threshold: Optional[float] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - if self._prefer_grpc: - vector_name = None - sparse_indices = None - - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - - if isinstance(query_vector, types.NamedVector): - vector = query_vector.vector - vector_name = query_vector.name - elif isinstance(query_vector, types.NamedSparseVector): - vector_name = query_vector.name - sparse_indices = grpc.SparseIndices(data=query_vector.vector.indices) - vector = query_vector.vector.values - elif isinstance(query_vector, tuple): - vector_name = query_vector[0] - vector = query_vector[1] - else: - vector = list(query_vector) - - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - result: grpc.GroupsResult = self.grpc_points.SearchGroups( - grpc.SearchPointGroups( - collection_name=collection_name, - vector=vector, - vector_name=vector_name, - filter=query_filter, - limit=limit, - group_size=group_size, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - group_by=group_by, - read_consistency=consistency, - with_lookup=with_lookup, - timeout=timeout, - sparse_indices=sparse_indices, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ).result - - return GrpcToRest.convert_groups_result(result) - else: - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - - if isinstance(query_vector, tuple): - query_vector = construct( - models.NamedVector, - name=query_vector[0], - vector=query_vector[1], - ) - - if isinstance(query_vector, np.ndarray): - query_vector = query_vector.tolist() - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - - search_groups_request = construct( - models.SearchGroupsRequest, - vector=query_vector, - filter=query_filter, - params=search_params, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - group_by=group_by, - group_size=group_size, - limit=limit, - with_lookup=with_lookup, - shard_key=shard_key_selector, - ) - - return self.openapi_client.search_api.search_point_groups( - search_groups_request=search_groups_request, - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - ).result - - def search_matrix_pairs( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.SearchMatrixPairsResponse: - if self._prefer_grpc: - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - response = self.grpc_points.SearchMatrixPairs( - grpc.SearchMatrixPoints( - collection_name=collection_name, - filter=query_filter, - sample=sample, - limit=limit, - using=using, - timeout=timeout, - read_consistency=consistency, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return GrpcToRest.convert_search_matrix_pairs(response.result) - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - search_matrix_result = self.openapi_client.search_api.search_matrix_pairs( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_matrix_request=models.SearchMatrixRequest( - shard_key=shard_key_selector, - limit=limit, - sample=sample, - using=using, - filter=query_filter, - ), - ).result - assert search_matrix_result is not None, "Search matrix pairs returned None result" - - return search_matrix_result - - def search_matrix_offsets( - self, - collection_name: str, - query_filter: Optional[types.Filter] = None, - limit: int = 3, - sample: int = 10, - using: Optional[str] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.SearchMatrixOffsetsResponse: - if self._prefer_grpc: - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - response = self.grpc_points.SearchMatrixOffsets( - grpc.SearchMatrixPoints( - collection_name=collection_name, - filter=query_filter, - sample=sample, - limit=limit, - using=using, - timeout=timeout, - read_consistency=consistency, - shard_key_selector=shard_key_selector, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - return GrpcToRest.convert_search_matrix_offsets(response.result) - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - search_matrix_result = self.openapi_client.search_api.search_matrix_offsets( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - search_matrix_request=models.SearchMatrixRequest( - shard_key=shard_key_selector, - limit=limit, - sample=sample, - using=using, - filter=query_filter, - ), - ).result - assert search_matrix_result is not None, "Search matrix offsets returned None result" - - return search_matrix_result - - def recommend_batch( - self, - collection_name: str, - requests: Sequence[types.RecommendRequest], - consistency: Optional[types.ReadConsistency] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: - if self._prefer_grpc: - requests = [ - ( - RestToGrpc.convert_recommend_request(r, collection_name) - if isinstance(r, models.RecommendRequest) - else r - ) - for r in requests - ] - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - grpc_res: grpc.SearchBatchResponse = self.grpc_points.RecommendBatch( - grpc.RecommendBatchPoints( - collection_name=collection_name, - recommend_points=requests, - read_consistency=consistency, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - ( - GrpcToRest.convert_recommend_points(r) - if isinstance(r, grpc.RecommendPoints) - else r - ) - for r in requests - ] - http_res: list[list[models.ScoredPoint]] = self.http.search_api.recommend_batch_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_request_batch=models.RecommendRequestBatch(searches=requests), - ).result - return http_res - - def recommend( - self, - collection_name: str, - positive: Optional[Sequence[types.RecommendExample]] = None, - negative: Optional[Sequence[types.RecommendExample]] = None, - query_filter: Optional[types.Filter] = None, - search_params: Optional[types.SearchParams] = None, - limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - score_threshold: Optional[float] = None, - using: Optional[str] = None, - lookup_from: Optional[types.LookupLocation] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> list[types.ScoredPoint]: - if positive is None: - positive = [] - - if negative is None: - negative = [] - - if self._prefer_grpc: - positive_ids = RestToGrpc.convert_recommend_examples_to_ids(positive) - positive_vectors = RestToGrpc.convert_recommend_examples_to_vectors(positive) - - negative_ids = RestToGrpc.convert_recommend_examples_to_ids(negative) - negative_vectors = RestToGrpc.convert_recommend_examples_to_vectors(negative) - - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - if isinstance(strategy, (str, models.RecommendStrategy)): - strategy = RestToGrpc.convert_recommend_strategy(strategy) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - res: grpc.SearchResponse = self.grpc_points.Recommend( - grpc.RecommendPoints( - collection_name=collection_name, - positive=positive_ids, - negative=negative_ids, - filter=query_filter, - limit=limit, - offset=offset, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - using=using, - lookup_from=lookup_from, - read_consistency=consistency, - strategy=strategy, - positive_vectors=positive_vectors, - negative_vectors=negative_vectors, - shard_key_selector=shard_key_selector, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ) - - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] - else: - positive = [ - ( - GrpcToRest.convert_point_id(example) - if isinstance(example, grpc.PointId) - else example - ) - for example in positive - ] - - negative = [ - ( - GrpcToRest.convert_point_id(example) - if isinstance(example, grpc.PointId) - else example - ) - for example in negative - ] - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - - result = self.openapi_client.search_api.recommend_points( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_request=models.RecommendRequest( - filter=query_filter, - positive=positive, - negative=negative, - params=search_params, - limit=limit, - offset=offset, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - lookup_from=lookup_from, - using=using, - strategy=strategy, - shard_key=shard_key_selector, - ), - ).result - assert result is not None, "Recommend points API returned None" - return result - - def recommend_groups( - self, - collection_name: str, - group_by: str, - positive: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - negative: Optional[Sequence[Union[types.PointId, list[float]]]] = None, - query_filter: Optional[models.Filter] = None, - search_params: Optional[models.SearchParams] = None, - limit: int = 10, - group_size: int = 1, - score_threshold: Optional[float] = None, - with_payload: Union[bool, Sequence[str], models.PayloadSelector] = True, - with_vectors: Union[bool, Sequence[str]] = False, - using: Optional[str] = None, - lookup_from: Optional[models.LookupLocation] = None, - with_lookup: Optional[types.WithLookupInterface] = None, - strategy: Optional[types.RecommendStrategy] = None, - consistency: Optional[types.ReadConsistency] = None, - shard_key_selector: Optional[types.ShardKeySelector] = None, - timeout: Optional[int] = None, - **kwargs: Any, - ) -> types.GroupsResult: - positive = positive if positive is not None else [] - negative = negative if negative is not None else [] - - if self._prefer_grpc: - if isinstance(with_lookup, models.WithLookup): - with_lookup = RestToGrpc.convert_with_lookup(with_lookup) - - if isinstance(with_lookup, str): - with_lookup = grpc.WithLookup(collection=with_lookup) - - positive_ids = RestToGrpc.convert_recommend_examples_to_ids(positive) - positive_vectors = RestToGrpc.convert_recommend_examples_to_vectors(positive) - - negative_ids = RestToGrpc.convert_recommend_examples_to_ids(negative) - negative_vectors = RestToGrpc.convert_recommend_examples_to_vectors(negative) - - if isinstance(query_filter, models.Filter): - query_filter = RestToGrpc.convert_filter(model=query_filter) - - if isinstance(search_params, models.SearchParams): - search_params = RestToGrpc.convert_search_params(search_params) - - if isinstance(with_payload, get_args_subscribed(models.WithPayloadInterface)): - with_payload = RestToGrpc.convert_with_payload_interface(with_payload) - - if isinstance(with_vectors, get_args_subscribed(models.WithVector)): - with_vectors = RestToGrpc.convert_with_vectors(with_vectors) - - if isinstance(lookup_from, models.LookupLocation): - lookup_from = RestToGrpc.convert_lookup_location(lookup_from) - - if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): - consistency = RestToGrpc.convert_read_consistency(consistency) - - if isinstance(strategy, (str, models.RecommendStrategy)): - strategy = RestToGrpc.convert_recommend_strategy(strategy) - - if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): - shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - - res: grpc.GroupsResult = self.grpc_points.RecommendGroups( - grpc.RecommendPointGroups( - collection_name=collection_name, - positive=positive_ids, - negative=negative_ids, - filter=query_filter, - group_by=group_by, - limit=limit, - group_size=group_size, - with_vectors=with_vectors, - with_payload=with_payload, - params=search_params, - score_threshold=score_threshold, - using=using, - lookup_from=lookup_from, - read_consistency=consistency, - with_lookup=with_lookup, - strategy=strategy, - positive_vectors=positive_vectors, - negative_vectors=negative_vectors, - shard_key_selector=shard_key_selector, - timeout=timeout, - ), - timeout=timeout if timeout is not None else self._timeout, - ).result - - assert res is not None, "Recommend groups API returned None" - return GrpcToRest.convert_groups_result(res) - else: - if isinstance(with_lookup, grpc.WithLookup): - with_lookup = GrpcToRest.convert_with_lookup(with_lookup) - - positive = [ - ( - GrpcToRest.convert_point_id(point_id) - if isinstance(point_id, grpc.PointId) - else point_id - ) - for point_id in positive - ] - - negative = [ - ( - GrpcToRest.convert_point_id(point_id) - if isinstance(point_id, grpc.PointId) - else point_id - ) - for point_id in negative - ] - - if isinstance(query_filter, grpc.Filter): - query_filter = GrpcToRest.convert_filter(model=query_filter) - - if isinstance(search_params, grpc.SearchParams): - search_params = GrpcToRest.convert_search_params(search_params) - - if isinstance(with_payload, grpc.WithPayloadSelector): - with_payload = GrpcToRest.convert_with_payload_selector(with_payload) - - if isinstance(lookup_from, grpc.LookupLocation): - lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - - result = self.openapi_client.search_api.recommend_point_groups( - collection_name=collection_name, - consistency=consistency, - timeout=timeout, - recommend_groups_request=construct( - models.RecommendGroupsRequest, - positive=positive, - negative=negative, - filter=query_filter, - group_by=group_by, - limit=limit, - group_size=group_size, - params=search_params, - with_payload=with_payload, - with_vector=with_vectors, - score_threshold=score_threshold, - lookup_from=lookup_from, - using=using, - with_lookup=with_lookup, - strategy=strategy, - shard_key=shard_key_selector, - ), - ).result - - assert result is not None, "Recommend points API returned None" - return result - - def discover( - self, - collection_name: str, - target: Optional[types.TargetVector] = None, - context: Optional[Sequence[types.ContextExamplePair]] = None, + types.NumpyArray, + types.Document, + types.Image, + types.InferenceObject, + None, + ] = None, + using: Optional[str] = None, + prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None, query_filter: Optional[types.Filter] = None, search_params: Optional[types.SearchParams] = None, limit: int = 10, - offset: int = 0, - with_payload: Union[bool, list[str], types.PayloadSelector] = True, - with_vectors: Union[bool, list[str]] = False, - using: Optional[str] = None, + group_size: int = 3, + with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True, + with_vectors: Union[bool, Sequence[str]] = False, + score_threshold: Optional[float] = None, + with_lookup: Optional[types.WithLookupInterface] = None, lookup_from: Optional[types.LookupLocation] = None, consistency: Optional[types.ReadConsistency] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, - ) -> list[types.ScoredPoint]: - if context is None: - context = [] - + ) -> types.GroupsResult: if self._prefer_grpc: - target = ( - RestToGrpc.convert_target_vector(target) - if target is not None - and isinstance(target, get_args_subscribed(models.RecommendExample)) - else target - ) + if query is not None: + query = RestToGrpc.convert_query(query) - context = [ - ( - RestToGrpc.convert_context_example_pair(pair) - if isinstance(pair, models.ContextExamplePair) - else pair - ) - for pair in context - ] + if isinstance(prefetch, models.Prefetch): + prefetch = [RestToGrpc.convert_prefetch_query(prefetch)] + + if isinstance(prefetch, list): + prefetch = [ + RestToGrpc.convert_prefetch_query(p) if isinstance(p, models.Prefetch) else p + for p in prefetch + ] if isinstance(query_filter, models.Filter): query_filter = RestToGrpc.convert_filter(model=query_filter) @@ -1579,6 +658,12 @@ def discover( if isinstance(with_vectors, get_args_subscribed(models.WithVector)): with_vectors = RestToGrpc.convert_with_vectors(with_vectors) + if isinstance(with_lookup, models.WithLookup): + with_lookup = RestToGrpc.convert_with_lookup(with_lookup) + + if isinstance(with_lookup, str): + with_lookup = grpc.WithLookup(collection=with_lookup) + if isinstance(lookup_from, models.LookupLocation): lookup_from = RestToGrpc.convert_lookup_location(lookup_from) @@ -1588,43 +673,30 @@ def discover( if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) - res: grpc.SearchResponse = self.grpc_points.Discover( - grpc.DiscoverPoints( + result: grpc.QueryGroupsResponse = self.grpc_points.QueryGroups( + grpc.QueryPointGroups( collection_name=collection_name, - target=target, - context=context, + query=query, + prefetch=prefetch, filter=query_filter, limit=limit, - offset=offset, with_vectors=with_vectors, with_payload=with_payload, params=search_params, + score_threshold=score_threshold, using=using, + group_by=group_by, + group_size=group_size, + with_lookup=with_lookup, lookup_from=lookup_from, - read_consistency=consistency, - shard_key_selector=shard_key_selector, timeout=timeout, + shard_key_selector=shard_key_selector, + read_consistency=consistency, ), timeout=timeout if timeout is not None else self._timeout, - ) - - return [GrpcToRest.convert_scored_point(hit) for hit in res.result] + ).result + return GrpcToRest.convert_groups_result(result) else: - target = ( - GrpcToRest.convert_target_vector(target) - if target is not None and isinstance(target, grpc.TargetVector) - else target - ) - - context = [ - ( - GrpcToRest.convert_context_example_pair(pair) - if isinstance(pair, grpc.ContextExamplePair) - else pair - ) - for pair in context - ] - if isinstance(query_filter, grpc.Filter): query_filter = GrpcToRest.convert_filter(model=query_filter) @@ -1637,74 +709,143 @@ def discover( if isinstance(lookup_from, grpc.LookupLocation): lookup_from = GrpcToRest.convert_lookup_location(lookup_from) - result = self.openapi_client.search_api.discover_points( + query_request = models.QueryGroupsRequest( + shard_key=shard_key_selector, + prefetch=prefetch, + query=query, + using=using, + filter=query_filter, + params=search_params, + score_threshold=score_threshold, + limit=limit, + group_by=group_by, + group_size=group_size, + with_vector=with_vectors, + with_payload=with_payload, + with_lookup=with_lookup, + lookup_from=lookup_from, + ) + + query_result = self.http.search_api.query_points_groups( collection_name=collection_name, consistency=consistency, timeout=timeout, - discover_request=models.DiscoverRequest( - target=target, - context=context, + query_groups_request=query_request, + ) + assert query_result is not None, "Query points groups API returned None" + return query_result.result + + def search_matrix_pairs( + self, + collection_name: str, + query_filter: Optional[types.Filter] = None, + limit: int = 3, + sample: int = 10, + using: Optional[str] = None, + consistency: Optional[types.ReadConsistency] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, + timeout: Optional[int] = None, + **kwargs: Any, + ) -> types.SearchMatrixPairsResponse: + if self._prefer_grpc: + if isinstance(query_filter, models.Filter): + query_filter = RestToGrpc.convert_filter(model=query_filter) + + if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): + shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) + + response = self.grpc_points.SearchMatrixPairs( + grpc.SearchMatrixPoints( + collection_name=collection_name, filter=query_filter, - params=search_params, + sample=sample, limit=limit, - offset=offset, - with_payload=with_payload, - with_vector=with_vectors, - lookup_from=lookup_from, using=using, - shard_key=shard_key_selector, + timeout=timeout, + read_consistency=consistency, + shard_key_selector=shard_key_selector, ), - ).result - assert result is not None, "Discover points API returned None" - return result + timeout=timeout if timeout is not None else self._timeout, + ) + return GrpcToRest.convert_search_matrix_pairs(response.result) + + if isinstance(query_filter, grpc.Filter): + query_filter = GrpcToRest.convert_filter(model=query_filter) + + search_matrix_result = self.openapi_client.search_api.search_matrix_pairs( + collection_name=collection_name, + consistency=consistency, + timeout=timeout, + search_matrix_request=models.SearchMatrixRequest( + shard_key=shard_key_selector, + limit=limit, + sample=sample, + using=using, + filter=query_filter, + ), + ).result + assert search_matrix_result is not None, "Search matrix pairs returned None result" + + return search_matrix_result - def discover_batch( + def search_matrix_offsets( self, collection_name: str, - requests: Sequence[types.DiscoverRequest], + query_filter: Optional[types.Filter] = None, + limit: int = 3, + sample: int = 10, + using: Optional[str] = None, consistency: Optional[types.ReadConsistency] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, **kwargs: Any, - ) -> list[list[types.ScoredPoint]]: + ) -> types.SearchMatrixOffsetsResponse: if self._prefer_grpc: - requests = [ - ( - RestToGrpc.convert_discover_request(r, collection_name) - if isinstance(r, models.DiscoverRequest) - else r - ) - for r in requests - ] + if isinstance(query_filter, models.Filter): + query_filter = RestToGrpc.convert_filter(model=query_filter) + + if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): + shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) - grpc_res: grpc.SearchBatchResponse = self.grpc_points.DiscoverBatch( - grpc.DiscoverBatchPoints( + response = self.grpc_points.SearchMatrixOffsets( + grpc.SearchMatrixPoints( collection_name=collection_name, - discover_points=requests, - read_consistency=consistency, + filter=query_filter, + sample=sample, + limit=limit, + using=using, timeout=timeout, + read_consistency=consistency, + shard_key_selector=shard_key_selector, ), timeout=timeout if timeout is not None else self._timeout, ) + return GrpcToRest.convert_search_matrix_offsets(response.result) - return [ - [GrpcToRest.convert_scored_point(hit) for hit in r.result] for r in grpc_res.result - ] - else: - requests = [ - ( - GrpcToRest.convert_discover_points(r) - if isinstance(r, grpc.DiscoverPoints) - else r - ) - for r in requests - ] - http_res: list[list[models.ScoredPoint]] = self.http.search_api.discover_batch_points( - collection_name=collection_name, - discover_request_batch=models.DiscoverRequestBatch(searches=requests), - consistency=consistency, - timeout=timeout, - ).result - return http_res + if isinstance(query_filter, grpc.Filter): + query_filter = GrpcToRest.convert_filter(model=query_filter) + + search_matrix_result = self.openapi_client.search_api.search_matrix_offsets( + collection_name=collection_name, + consistency=consistency, + timeout=timeout, + search_matrix_request=models.SearchMatrixRequest( + shard_key=shard_key_selector, + limit=limit, + sample=sample, + using=using, + filter=query_filter, + ), + ).result + assert search_matrix_result is not None, "Search matrix offsets returned None result" + + return search_matrix_result def scroll( self, @@ -1803,6 +944,7 @@ def count( exact: bool = True, shard_key_selector: Optional[types.ShardKeySelector] = None, timeout: Optional[int] = None, + consistency: Optional[types.ReadConsistency] = None, **kwargs: Any, ) -> types.CountResult: if self._prefer_grpc: @@ -1812,6 +954,9 @@ def count( if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(consistency, get_args_subscribed(models.ReadConsistency)): + consistency = RestToGrpc.convert_read_consistency(consistency) + response = self.grpc_points.Count( grpc.CountPoints( collection_name=collection_name, @@ -1819,6 +964,7 @@ def count( exact=exact, shard_key_selector=shard_key_selector, timeout=timeout, + read_consistency=consistency, ), timeout=timeout if timeout is not None else self._timeout, ).result @@ -1834,6 +980,7 @@ def count( exact=exact, shard_key=shard_key_selector, ), + consistency=consistency, timeout=timeout, ).result assert count_result is not None, "Count points returned None result" @@ -1905,6 +1052,7 @@ def upsert( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: if self._prefer_grpc: @@ -1940,6 +1088,9 @@ def upsert( if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(update_filter, models.Filter): + update_filter = RestToGrpc.convert_filter(model=update_filter) + grpc_result = self.grpc_points.Upsert( grpc.UpsertPoints( collection_name=collection_name, @@ -1947,6 +1098,7 @@ def upsert( points=points, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ), timeout=self._timeout, ).result @@ -1954,6 +1106,9 @@ def upsert( assert grpc_result is not None, "Upsert returned None result" return GrpcToRest.convert_update_result(grpc_result) else: + if isinstance(update_filter, grpc.Filter): + update_filter = GrpcToRest.convert_filter(model=update_filter) + if isinstance(points, list): points = [ ( @@ -1964,10 +1119,14 @@ def upsert( for point in points ] - points = models.PointsList(points=points, shard_key=shard_key_selector) + points = models.PointsList( + points=points, shard_key=shard_key_selector, update_filter=update_filter + ) if isinstance(points, models.Batch): - points = models.PointsBatch(batch=points, shard_key=shard_key_selector) + points = models.PointsBatch( + batch=points, shard_key=shard_key_selector, update_filter=update_filter + ) http_result = self.openapi_client.points_api.upsert_points( collection_name=collection_name, @@ -1985,6 +1144,7 @@ def update_vectors( wait: bool = True, ordering: Optional[types.WriteOrdering] = None, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> types.UpdateResult: if self._prefer_grpc: @@ -1996,6 +1156,9 @@ def update_vectors( if isinstance(shard_key_selector, get_args_subscribed(models.ShardKeySelector)): shard_key_selector = RestToGrpc.convert_shard_key_selector(shard_key_selector) + if isinstance(update_filter, models.Filter): + update_filter = RestToGrpc.convert_filter(model=update_filter) + grpc_result = self.grpc_points.UpdateVectors( grpc.UpdatePointVectors( collection_name=collection_name, @@ -2003,18 +1166,23 @@ def update_vectors( points=points, ordering=ordering, shard_key_selector=shard_key_selector, + update_filter=update_filter, ), timeout=self._timeout, ).result assert grpc_result is not None, "Upsert returned None result" return GrpcToRest.convert_update_result(grpc_result) else: + if isinstance(update_filter, grpc.Filter): + update_filter = GrpcToRest.convert_filter(model=update_filter) + return self.openapi_client.points_api.update_vectors( collection_name=collection_name, wait=wait, update_vectors=models.UpdateVectors( points=points, shard_key=shard_key_selector, + update_filter=update_filter, ), ordering=ordering, ).result @@ -2672,6 +1840,7 @@ def update_collection( timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: if self._prefer_grpc: @@ -2700,6 +1869,9 @@ def update_collection( if isinstance(strict_mode_config, models.StrictModeConfig): strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config) + if isinstance(metadata, dict): + metadata = RestToGrpc.convert_payload(metadata) + return self.grpc_collections.Update( grpc.UpdateCollection( collection_name=collection_name, @@ -2711,6 +1883,7 @@ def update_collection( sparse_vectors_config=sparse_vectors_config, strict_mode_config=strict_mode_config, timeout=timeout, + metadata=metadata, ), timeout=timeout if timeout is not None else self._timeout, ).result @@ -2740,6 +1913,7 @@ def update_collection( quantization_config=quantization_config, sparse_vectors=sparse_vectors_config, strict_mode_config=strict_mode_config, + metadata=metadata, ), timeout=timeout, ).result @@ -2775,21 +1949,13 @@ def create_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, sharding_method: Optional[types.ShardingMethod] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: - if init_from is not None: - show_warning_once( - message="init_from is deprecated", - category=DeprecationWarning, - stacklevel=5, - idx="create-collection-init-from", - ) - if self._prefer_grpc: if isinstance(vectors_config, (models.VectorParams, dict)): vectors_config = RestToGrpc.convert_vectors_config(vectors_config) @@ -2809,9 +1975,6 @@ def create_collection( ): quantization_config = RestToGrpc.convert_quantization_config(quantization_config) - if isinstance(init_from, models.InitFrom): - init_from = RestToGrpc.convert_init_from(init_from) - if isinstance(sparse_vectors_config, dict): sparse_vectors_config = RestToGrpc.convert_sparse_vector_config( sparse_vectors_config @@ -2823,6 +1986,9 @@ def create_collection( if isinstance(strict_mode_config, models.StrictModeConfig): strict_mode_config = RestToGrpc.convert_strict_mode_config(strict_mode_config) + if isinstance(metadata, dict): + metadata = RestToGrpc.convert_payload(metadata) + create_collection = grpc.CreateCollection( collection_name=collection_name, hnsw_config=hnsw_config, @@ -2834,11 +2000,11 @@ def create_collection( vectors_config=vectors_config, replication_factor=replication_factor, write_consistency_factor=write_consistency_factor, - init_from_collection=init_from, quantization_config=quantization_config, sparse_vectors_config=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) return self.grpc_collections.Create(create_collection, timeout=self._timeout).result @@ -2854,9 +2020,6 @@ def create_collection( if isinstance(quantization_config, grpc.QuantizationConfig): quantization_config = GrpcToRest.convert_quantization_config(quantization_config) - if isinstance(init_from, str): - init_from = GrpcToRest.convert_init_from(init_from) - create_collection_request = models.CreateCollection( vectors=vectors_config, shard_number=shard_number, @@ -2867,10 +2030,10 @@ def create_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, sparse_vectors=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) result: Optional[bool] = self.http.collections_api.create_collection( @@ -2894,11 +2057,11 @@ def recreate_collection( optimizers_config: Optional[types.OptimizersConfigDiff] = None, wal_config: Optional[types.WalConfigDiff] = None, quantization_config: Optional[types.QuantizationConfig] = None, - init_from: Optional[types.InitFrom] = None, timeout: Optional[int] = None, sparse_vectors_config: Optional[Mapping[str, types.SparseVectorParams]] = None, sharding_method: Optional[types.ShardingMethod] = None, strict_mode_config: Optional[types.StrictModeConfig] = None, + metadata: Optional[types.Payload] = None, **kwargs: Any, ) -> bool: self.delete_collection(collection_name, timeout=timeout) @@ -2914,11 +2077,11 @@ def recreate_collection( optimizers_config=optimizers_config, wal_config=wal_config, quantization_config=quantization_config, - init_from=init_from, timeout=timeout, sparse_vectors_config=sparse_vectors_config, sharding_method=sharding_method, strict_mode_config=strict_mode_config, + metadata=metadata, ) @property @@ -2937,6 +2100,7 @@ def _upload_collection( method: Optional[str] = None, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, ) -> None: if method is not None: if method in get_all_start_methods(): @@ -2960,6 +2124,7 @@ def _upload_collection( "shard_key_selector": shard_key_selector, "options": self._grpc_options, "timeout": self._timeout, + "update_filter": update_filter, } else: updater_kwargs = { @@ -2968,6 +2133,7 @@ def _upload_collection( "max_retries": max_retries, "wait": wait, "shard_key_selector": shard_key_selector, + "update_filter": update_filter, **self._rest_args, } @@ -2980,31 +2146,6 @@ def _upload_collection( for _ in pool.unordered_map(batches_iterator, **updater_kwargs): pass - def upload_records( - self, - collection_name: str, - records: Iterable[types.Record], - batch_size: int = 64, - parallel: int = 1, - method: Optional[str] = None, - max_retries: int = 3, - wait: bool = False, - shard_key_selector: Optional[types.ShardKeySelector] = None, - **kwargs: Any, - ) -> None: - batches_iterator = self._updater_class.iterate_records_batches( - records=records, batch_size=batch_size - ) - self._upload_collection( - batches_iterator=batches_iterator, - collection_name=collection_name, - max_retries=max_retries, - parallel=parallel, - method=method, - shard_key_selector=shard_key_selector, - wait=wait, - ) - def upload_points( self, collection_name: str, @@ -3015,6 +2156,7 @@ def upload_points( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: batches_iterator = self._updater_class.iterate_records_batches( @@ -3029,6 +2171,7 @@ def upload_points( method=method, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def upload_collection( @@ -3045,6 +2188,7 @@ def upload_collection( max_retries: int = 3, wait: bool = False, shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ) -> None: batches_iterator = self._updater_class.iterate_batches( @@ -3062,6 +2206,7 @@ def upload_collection( method=method, wait=wait, shard_key_selector=shard_key_selector, + update_filter=update_filter, ) def create_payload_index( @@ -3347,25 +2492,6 @@ def recover_shard_snapshot( ), ).result - def lock_storage(self, reason: str, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = self.openapi_client.service_api.post_locks( - models.LocksOption(error_message=reason, write=True) - ).result - assert result is not None, "Lock storage returned None" - return result - - def unlock_storage(self, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = self.openapi_client.service_api.post_locks( - models.LocksOption(write=False) - ).result - assert result is not None, "Post locks returned None" - return result - - def get_locks(self, **kwargs: Any) -> types.LocksOption: - result: Optional[types.LocksOption] = self.openapi_client.service_api.get_locks().result - assert result is not None, "Get locks returned None" - return result - def create_shard_key( self, collection_name: str, @@ -3373,6 +2499,7 @@ def create_shard_key( shards_number: Optional[int] = None, replication_factor: Optional[int] = None, placement: Optional[list[int]] = None, + initial_state: Optional[types.ReplicaState] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> bool: @@ -3380,6 +2507,9 @@ def create_shard_key( if isinstance(shard_key, get_args_subscribed(models.ShardKey)): shard_key = RestToGrpc.convert_shard_key(shard_key) + if isinstance(initial_state, models.ReplicaState): + initial_state = RestToGrpc.convert_replica_state(initial_state) + return self.grpc_collections.CreateShardKey( grpc.CreateShardKeyRequest( collection_name=collection_name, @@ -3389,6 +2519,7 @@ def create_shard_key( shards_number=shards_number, replication_factor=replication_factor, placement=placement or [], + initial_state=initial_state, ), ), timeout=timeout if timeout is not None else self._timeout, @@ -3402,6 +2533,7 @@ def create_shard_key( shards_number=shards_number, replication_factor=replication_factor, placement=placement, + initial_state=initial_state, ), ).result assert result is not None, "Create shard key returned None" diff --git a/qdrant_client/uploader/grpc_uploader.py b/qdrant_client/uploader/grpc_uploader.py index fbe72d8d..ef51b37a 100644 --- a/qdrant_client/uploader/grpc_uploader.py +++ b/qdrant_client/uploader/grpc_uploader.py @@ -3,33 +3,39 @@ from typing import Any, Generator, Iterable, Optional, Union from uuid import uuid4 + from qdrant_client import grpc as grpc +from qdrant_client import models as rest from qdrant_client.common.client_exceptions import ResourceExhaustedResponse from qdrant_client.connection import get_channel from qdrant_client.conversions.conversion import RestToGrpc, payload_to_grpc -from qdrant_client.grpc import PointId, PointsStub, PointStruct -from qdrant_client.http.models import Batch, ShardKeySelector from qdrant_client.uploader.uploader import BaseUploader from qdrant_client.common.client_warnings import show_warning +from qdrant_client.conversions import common_types as types def upload_batch_grpc( - points_client: PointsStub, + points_client: grpc.PointsStub, collection_name: str, - batch: Union[Batch, tuple], + batch: Union[rest.Batch, tuple], # type: ignore[name-defined] max_retries: int, - shard_key_selector: Optional[ShardKeySelector], + shard_key_selector: Optional[grpc.ShardKeySelector], # type: ignore[name-defined] + update_filter: Optional[grpc.Filter], wait: bool = False, timeout: Optional[int] = None, ) -> bool: ids_batch, vectors_batch, payload_batch = batch - ids_batch = (PointId(uuid=str(uuid4())) for _ in count()) if ids_batch is None else ids_batch + ids_batch = ( + (grpc.PointId(uuid=str(uuid4())) for _ in count()) if ids_batch is None else ids_batch + ) payload_batch = (None for _ in count()) if payload_batch is None else payload_batch points = [ - PointStruct( - id=RestToGrpc.convert_extended_point_id(idx) if not isinstance(idx, PointId) else idx, + grpc.PointStruct( + id=RestToGrpc.convert_extended_point_id(idx) + if not isinstance(idx, grpc.PointId) + else idx, vectors=RestToGrpc.convert_vector_struct(vector), payload=payload_to_grpc(payload or {}), ) @@ -44,9 +50,8 @@ def upload_batch_grpc( collection_name=collection_name, points=points, wait=wait, - shard_key_selector=RestToGrpc.convert_shard_key_selector(shard_key_selector) - if shard_key_selector is not None - else None, + shard_key_selector=shard_key_selector, + update_filter=update_filter, ), timeout=timeout, ) @@ -81,7 +86,8 @@ def __init__( collection_name: str, max_retries: int, wait: bool = False, - shard_key_selector: Optional[ShardKeySelector] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ): self.collection_name = collection_name @@ -90,8 +96,17 @@ def __init__( self.max_retries = max_retries self._kwargs = kwargs self._wait = wait - self._shard_key_selector = shard_key_selector + self._shard_key_selector = ( + RestToGrpc.convert_shard_key_selector(shard_key_selector) + if shard_key_selector is not None + else None + ) self._timeout = kwargs.pop("timeout", None) + self._update_filter = ( + RestToGrpc.convert_filter(update_filter) + if isinstance(update_filter, rest.Filter) # type: ignore[attr-defined] + else update_filter + ) @classmethod def start( @@ -115,13 +130,14 @@ def start( def process_upload(self, items: Iterable[Any]) -> Generator[bool, None, None]: channel = get_channel(host=self._host, port=self._port, **self._kwargs) - points_client = PointsStub(channel) + points_client = grpc.PointsStub(channel) for batch in items: yield upload_batch_grpc( points_client, self.collection_name, batch, shard_key_selector=self._shard_key_selector, + update_filter=self._update_filter, max_retries=self.max_retries, wait=self._wait, timeout=self._timeout, diff --git a/qdrant_client/uploader/rest_uploader.py b/qdrant_client/uploader/rest_uploader.py index 1fd15bf0..48268358 100644 --- a/qdrant_client/uploader/rest_uploader.py +++ b/qdrant_client/uploader/rest_uploader.py @@ -5,19 +5,23 @@ import numpy as np +from qdrant_client import grpc as grpc from qdrant_client.common.client_exceptions import ResourceExhaustedResponse from qdrant_client.http import SyncApis -from qdrant_client.http.models import Batch, PointsList, PointStruct, ShardKeySelector +from qdrant_client import models as rest from qdrant_client.uploader.uploader import BaseUploader from qdrant_client.common.client_warnings import show_warning +from qdrant_client.conversions import common_types as types +from qdrant_client.conversions.conversion import GrpcToRest def upload_batch( openapi_client: SyncApis, collection_name: str, - batch: Union[tuple, Batch], + batch: Union[tuple, rest.Batch], # type: ignore[name-defined] max_retries: int, - shard_key_selector: Optional[ShardKeySelector], + shard_key_selector: Optional[rest.ShardKeySelector], # type: ignore[name-defined] + update_filter: Optional[rest.Filter], # type: ignore[name-defined] wait: bool = False, ) -> bool: ids_batch, vectors_batch, payload_batch = batch @@ -26,7 +30,7 @@ def upload_batch( payload_batch = (None for _ in count()) if payload_batch is None else payload_batch points = [ - PointStruct( + rest.PointStruct( # type: ignore[attr-defined] id=idx, vector=(vector.tolist() if isinstance(vector, np.ndarray) else vector) or {}, payload=payload, @@ -39,7 +43,9 @@ def upload_batch( try: openapi_client.points_api.upsert_points( collection_name=collection_name, - point_insert_operations=PointsList(points=points, shard_key=shard_key_selector), + point_insert_operations=rest.PointsList( # type: ignore[attr-defined] + points=points, shard_key=shard_key_selector, update_filter=update_filter + ), wait=wait, ) break @@ -72,7 +78,8 @@ def __init__( collection_name: str, max_retries: int, wait: bool = False, - shard_key_selector: Optional[ShardKeySelector] = None, + shard_key_selector: Optional[types.ShardKeySelector] = None, + update_filter: Optional[types.Filter] = None, **kwargs: Any, ): self.collection_name = collection_name @@ -80,6 +87,11 @@ def __init__( self.max_retries = max_retries self._wait = wait self._shard_key_selector = shard_key_selector + self._update_filter = ( + GrpcToRest.convert_filter(model=update_filter) + if isinstance(update_filter, grpc.Filter) + else update_filter + ) @classmethod def start( @@ -101,5 +113,6 @@ def process(self, items: Iterable[Any]) -> Iterable[bool]: batch, shard_key_selector=self._shard_key_selector, max_retries=self.max_retries, + update_filter=self._update_filter, wait=self._wait, ) diff --git a/tests/congruence_tests/test_collections.py b/tests/congruence_tests/test_collections.py index 48aca619..9be75351 100644 --- a/tests/congruence_tests/test_collections.py +++ b/tests/congruence_tests/test_collections.py @@ -1,12 +1,9 @@ from time import sleep from typing import Callable -import pytest - from qdrant_client.http import models from qdrant_client.http.exceptions import UnexpectedResponse from tests.congruence_tests.test_common import ( - compare_collections, generate_fixtures, init_client, init_local, @@ -100,75 +97,6 @@ def test_collection_exists(): assert local_client.collection_exists(COLLECTION_NAME) -def test_init_from(): - vector_size = 2 - - remote_client = init_remote() - local_client = init_local() - - points = generate_fixtures(vectors_sizes=vector_size) - vector_params = models.VectorParams(size=vector_size, distance=models.Distance.COSINE) - - if remote_client.collection_exists(COLLECTION_NAME): - remote_client.delete_collection(collection_name=COLLECTION_NAME) - remote_client.create_collection(collection_name=COLLECTION_NAME, vectors_config=vector_params) - - if local_client.collection_exists(COLLECTION_NAME): - local_client.delete_collection(collection_name=COLLECTION_NAME) - local_client.create_collection(collection_name=COLLECTION_NAME, vectors_config=vector_params) - - remote_client.upload_points(COLLECTION_NAME, points, wait=True) - local_client.upload_points(COLLECTION_NAME, points) - compare_collections(remote_client, local_client, len(points), collection_name=COLLECTION_NAME) - - new_collection_name = COLLECTION_NAME + "_new" - if remote_client.collection_exists(new_collection_name): - remote_client.delete_collection(new_collection_name) - remote_client.create_collection( - new_collection_name, vectors_config=vector_params, init_from=COLLECTION_NAME - ) - - if local_client.collection_exists(new_collection_name): - local_client.delete_collection(new_collection_name) - local_client.create_collection( - new_collection_name, vectors_config=vector_params, init_from=COLLECTION_NAME - ) - - # init_from is performed asynchronously, so we need to retry - wait_for( - compare_collections, - remote_client, - local_client, - len(points), - collection_name=new_collection_name, - ) - - # try with models.InitFrom - if remote_client.collection_exists(new_collection_name): - remote_client.delete_collection(new_collection_name) - remote_client.create_collection( - new_collection_name, - vectors_config=vector_params, - init_from=models.InitFrom(collection=COLLECTION_NAME), - ) - if local_client.collection_exists(new_collection_name): - local_client.delete_collection(new_collection_name) - local_client.create_collection( - new_collection_name, - vectors_config=vector_params, - init_from=models.InitFrom(collection=COLLECTION_NAME), - ) - - # init_from is performed asynchronously, so we need to retry - wait_for( - compare_collections, - remote_client, - local_client, - len(points), - collection_name=new_collection_name, - ) - - def test_config_variations(): def check_variation(vectors_config, sparse_vectors_config): if remote_client.collection_exists(COLLECTION_NAME): diff --git a/tests/congruence_tests/test_delete_points.py b/tests/congruence_tests/test_delete_points.py index 9a7b484b..341f900a 100644 --- a/tests/congruence_tests/test_delete_points.py +++ b/tests/congruence_tests/test_delete_points.py @@ -1,4 +1,3 @@ -from qdrant_client.http.models import NamedSparseVector, NamedVector from tests.congruence_tests.test_common import ( COLLECTION_NAME, compare_client_results, @@ -21,14 +20,14 @@ def test_delete_points(local_client, remote_client): compare_client_results( local_client, remote_client, - lambda c: c.search(COLLECTION_NAME, query_vector=NamedVector(name="image", vector=vector)), + lambda c: c.query_points(COLLECTION_NAME, query=vector, using="image").points, ) found_ids = [ scored_point.id - for scored_point in local_client.search( - COLLECTION_NAME, query_vector=NamedVector(name="image", vector=vector) - ) + for scored_point in local_client.query_points( + COLLECTION_NAME, query=vector, using="image" + ).points ] local_client.delete(COLLECTION_NAME, found_ids) @@ -39,10 +38,10 @@ def test_delete_points(local_client, remote_client): compare_client_results( local_client, remote_client, - lambda c: c.search(COLLECTION_NAME, query_vector=NamedVector(name="image", vector=vector)), + lambda c: c.query_points(COLLECTION_NAME, query=vector, using="image").points, ) - #delete non-existent points + # delete non-existent points local_client.delete(COLLECTION_NAME, found_ids) remote_client.delete(COLLECTION_NAME, found_ids) @@ -51,7 +50,7 @@ def test_delete_points(local_client, remote_client): compare_client_results( local_client, remote_client, - lambda c: c.search(COLLECTION_NAME, query_vector=NamedVector(name="image", vector=vector)), + lambda c: c.query_points(COLLECTION_NAME, query=vector, using="image").points, ) @@ -71,16 +70,18 @@ def test_delete_sparse_points(): compare_client_results( local_client, remote_client, - lambda c: c.search( - COLLECTION_NAME, query_vector=NamedSparseVector(name="sparse-image", vector=vector) - ), + lambda c: c.query_points( + COLLECTION_NAME, + query=vector, + using="sparse-image", + ).points, ) found_ids = [ scored_point.id - for scored_point in local_client.search( - COLLECTION_NAME, query_vector=NamedSparseVector(name="sparse-image", vector=vector) - ) + for scored_point in local_client.query_points( + COLLECTION_NAME, query=vector, using="sparse-image" + ).points ] local_client.delete(COLLECTION_NAME, found_ids) @@ -91,7 +92,5 @@ def test_delete_sparse_points(): compare_client_results( local_client, remote_client, - lambda c: c.search( - COLLECTION_NAME, query_vector=NamedSparseVector(name="sparse-image", vector=vector) - ), + lambda c: c.query_points(COLLECTION_NAME, query=vector, using="sparse-image").points, ) diff --git a/tests/congruence_tests/test_discovery.py b/tests/congruence_tests/test_discovery.py index f3803dac..3014d5fb 100644 --- a/tests/congruence_tests/test_discovery.py +++ b/tests/congruence_tests/test_discovery.py @@ -62,13 +62,14 @@ def test_context_cosine( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + # test single context pair + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=10, negative=19)], + query=models.ContextQuery(context=models.ContextPair(positive=10, negative=19)), with_payload=True, limit=1000, using="text", - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -80,13 +81,14 @@ def test_context_dot( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + # test list context pair + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=10, negative=19)], + query=models.ContextQuery(context=models.ContextPair(positive=10, negative=19)), with_payload=True, limit=1000, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -98,13 +100,13 @@ def test_context_euclidean( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=11, negative=19)], + query=models.ContextQuery(context=models.ContextPair(positive=11, negative=19)), with_payload=True, limit=1000, using="code", - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -119,21 +121,23 @@ def test_context_many_pairs( random_image_vector_2 = random_vector(image_vector_size) def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[ - models.ContextExamplePair(positive=11, negative=19), - models.ContextExamplePair(positive=400, negative=200), - models.ContextExamplePair( - positive=random_image_vector_1, negative=random_image_vector_2 - ), - models.ContextExamplePair(positive=30, negative=random_image_vector_2), - models.ContextExamplePair(positive=random_image_vector_1, negative=15), - ], + query=models.ContextQuery( + context=[ + models.ContextPair(positive=11, negative=19), + models.ContextPair(positive=400, negative=200), + models.ContextPair( + positive=random_image_vector_1, negative=random_image_vector_2 + ), + models.ContextPair(positive=30, negative=random_image_vector_2), + models.ContextPair(positive=random_image_vector_1, negative=15), + ] + ), with_payload=True, limit=1000, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -145,14 +149,19 @@ def test_discover_cosine( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + # test single context pair + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=11, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, + context=models.ContextPair(positive=11, negative=19), + ) + ), with_payload=True, limit=10, using="text", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -164,14 +173,18 @@ def test_discover_dot( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + # test list context pair + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=11, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=11, negative=19)] + ) + ), with_payload=True, limit=10, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -183,14 +196,17 @@ def test_discover_euclidean( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=11, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=11, negative=19)] + ) + ), with_payload=True, limit=10, using="code", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -204,13 +220,17 @@ def test_discover_raw_target( random_image_vector = random_vector(image_vector_size) def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=random_image_vector, - context=[models.ContextExamplePair(positive=10, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=random_image_vector, + context=[models.ContextPair(positive=10, negative=19)], + ) + ), limit=10, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -224,13 +244,17 @@ def test_context_raw_positive( random_image_vector = random_vector(image_vector_size) def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=random_image_vector, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, + context=[models.ContextPair(positive=random_image_vector, negative=19)], + ) + ), limit=10, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -242,13 +266,13 @@ def test_only_target( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, + query=models.DiscoverQuery(discover=models.DiscoverInput(target=10, context=[])), with_payload=True, limit=10, using="image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -261,12 +285,16 @@ def discover_from_another_collection( positive_point_id: Optional[int] = None, **kwargs: dict[str, Any], ) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=collection_name, - target=5, - context=[models.ContextExamplePair(positive=positive_point_id, negative=6)] - if positive_point_id is not None - else [], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=5, + context=[models.ContextPair(positive=positive_point_id, negative=6)] + if positive_point_id is not None + else [], + ) + ), with_payload=True, limit=10, using="image", @@ -274,7 +302,7 @@ def discover_from_another_collection( collection=lookup_collection_name, vector="image", ), - ) + ).points def test_discover_from_another_collection( @@ -317,19 +345,26 @@ def test_discover_batch( http_client, grpc_client, ): - def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[list[models.ScoredPoint]]: - return client.discover_batch( + def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.QueryResponse]: + return client.query_batch_points( collection_name=COLLECTION_NAME, requests=[ - models.DiscoverRequest( - target=10, - context=[models.ContextExamplePair(positive=15, negative=7)], + models.QueryRequest( + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, + context=[models.ContextPair(positive=15, negative=7)], + ) + ), limit=5, using="image", ), - models.DiscoverRequest( - target=11, - context=[models.ContextExamplePair(positive=15, negative=17)], + models.QueryRequest( + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=11, context=[models.ContextPair(positive=15, negative=17)] + ) + ), limit=6, using="image", lookup_from=models.LookupLocation( @@ -347,26 +382,32 @@ def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[list[models.ScoredPo @pytest.mark.parametrize("filter", [one_random_filter_please() for _ in range(10)]) def test_discover_with_filters(local_client, http_client, grpc_client, filter: models.Filter): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=15, negative=7)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=15, negative=7)] + ) + ), limit=15, using="image", query_filter=filter, - ) + ).points + + compare_client_results(grpc_client, http_client, f) + compare_client_results(local_client, http_client, f) @pytest.mark.parametrize("filter", [one_random_filter_please() for _ in range(10)]) def test_context_with_filters(local_client, http_client, grpc_client, filter: models.Filter): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=15, negative=7)], + query=models.ContextQuery(context=models.ContextPair(positive=15, negative=7)), limit=1000, using="image", query_filter=filter, - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -386,38 +427,38 @@ def test_query_with_nan(): init_client(remote_client, fixture_points) with pytest.raises(AssertionError): - local_client.discover( + local_client.query_points( collection_name=COLLECTION_NAME, - target=vector, + query=models.DiscoverQuery(discover=models.DiscoverInput(target=vector, context=[])), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.discover( + remote_client.query_points( collection_name=COLLECTION_NAME, - target=vector, + query=models.DiscoverQuery(discover=models.DiscoverInput(target=vector, context=[])), using=using, ) with pytest.raises(AssertionError): - local_client.discover( + local_client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=vector, negative=1)], + query=models.ContextQuery(context=models.ContextPair(positive=vector, negative=1)), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.discover( + remote_client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=vector, negative=1)], + query=models.ContextQuery(context=models.ContextPair(positive=vector, negative=1)), using=using, ) with pytest.raises(AssertionError): - local_client.discover( + local_client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=1, negative=vector)], + query=models.ContextQuery(context=models.ContextPair(positive=1, negative=vector)), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.discover( + remote_client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=1, negative=vector)], + query=models.ContextQuery(context=models.ContextPair(positive=1, negative=vector)), using=using, ) diff --git a/tests/congruence_tests/test_group_recommend.py b/tests/congruence_tests/test_group_recommend.py index d1ab047c..467e5ad6 100644 --- a/tests/congruence_tests/test_group_recommend.py +++ b/tests/congruence_tests/test_group_recommend.py @@ -21,10 +21,11 @@ def __init__(self): self.group_size = 1 def simple_recommend_groups_image(self, client: QdrantBase) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[]) + ), with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, using="image", @@ -34,23 +35,31 @@ def simple_recommend_groups_image(self, client: QdrantBase) -> models.GroupsResu ) def simple_recommend_groups_best_scores(self, client: QdrantBase) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10], + negative=[], + strategy=models.RecommendStrategy.BEST_SCORE, + ), + ), with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, using="image", - strategy=models.RecommendStrategy.BEST_SCORE, group_by=self.group_by, group_size=self.group_size, search_params=models.SearchParams(exact=True), ) def many_recommend_groups(self, client: QdrantBase) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10, 19], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 19], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, using="image", @@ -60,10 +69,11 @@ def many_recommend_groups(self, client: QdrantBase) -> models.GroupsResult: ) def simple_recommend_groups_negative(self, client: QdrantBase) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10], - negative=[15, 7], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[15, 7]) + ), with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, using="image", @@ -73,10 +83,11 @@ def simple_recommend_groups_negative(self, client: QdrantBase) -> models.GroupsR ) def recommend_groups_from_another_collection(self, client: QdrantBase) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10], - negative=[15, 7], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[15, 7]) + ), with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, using="image", @@ -92,9 +103,9 @@ def recommend_groups_from_another_collection(self, client: QdrantBase) -> models def filter_recommend_groups_text( self, client: QdrantBase, query_filter: models.Filter ) -> models.GroupsResult: - return client.recommend_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - positive=[10], + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10])), query_filter=query_filter, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=10, diff --git a/tests/congruence_tests/test_group_search.py b/tests/congruence_tests/test_group_search.py index dbe01f1c..7c08611a 100644 --- a/tests/congruence_tests/test_group_search.py +++ b/tests/congruence_tests/test_group_search.py @@ -43,9 +43,13 @@ def group_search( types.NamedVector, ], ) -> models.GroupsResult: - return client.search_groups( + using = None + if isinstance(query_vector, tuple): + using, query_vector = query_vector + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, + using=using, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -53,9 +57,10 @@ def group_search( ) def group_search_text(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -63,9 +68,9 @@ def group_search_text(self, client: QdrantBase) -> models.GroupsResult: ) def group_search_text_single(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=self.query_text, + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -73,9 +78,10 @@ def group_search_text_single(self, client: QdrantBase) -> models.GroupsResult: ) def group_search_image(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + using="image", + query=self.query_image, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -83,9 +89,10 @@ def group_search_image(self, client: QdrantBase) -> models.GroupsResult: ) def group_search_image_with_lookup(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + query=self.query_image, + using="image", with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -94,9 +101,10 @@ def group_search_image_with_lookup(self, client: QdrantBase) -> models.GroupsRes ) def group_search_image_with_lookup_2(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + using="image", + query=self.query_image, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -109,9 +117,10 @@ def group_search_image_with_lookup_2(self, client: QdrantBase) -> models.GroupsR ) def group_search_code(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("code", self.query_code), + using="code", + query=self.query_code, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), group_by=self.group_by, limit=self.limit, @@ -119,9 +128,10 @@ def group_search_code(self, client: QdrantBase) -> models.GroupsResult: ) def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResult: - res1 = client.search_groups( + res1 = client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=self.limit, group_by=self.group_by, @@ -129,9 +139,10 @@ def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResul group_size=self.group_size, ) - res2 = client.search_groups( + res2 = client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=self.limit, group_by=self.group_by, @@ -139,9 +150,10 @@ def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResul group_size=self.group_size, ) - res3 = client.search_groups( + res3 = client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=self.limit, group_by=self.group_by, @@ -152,9 +164,10 @@ def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResul return models.GroupsResult(groups=res1.groups + res2.groups + res3.groups) def group_search_text_select_payload(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=["text_array", "nested.id"], limit=self.limit, group_by=self.group_by, @@ -162,9 +175,10 @@ def group_search_text_select_payload(self, client: QdrantBase) -> models.GroupsR ) def group_search_payload_exclude(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude( exclude=["text_array", "nested.id", "city.geo", "rand_number"] ), @@ -174,9 +188,10 @@ def group_search_payload_exclude(self, client: QdrantBase) -> models.GroupsResul ) def group_search_image_select_vector(self, client: QdrantBase) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + using="image", + query=self.query_image, with_payload=False, with_vectors=["image", "code"], limit=self.limit, @@ -187,9 +202,10 @@ def group_search_image_select_vector(self, client: QdrantBase) -> models.GroupsR def filter_group_search_text( self, client: QdrantBase, query_filter: models.Filter ) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, query_filter=query_filter, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), limit=self.limit, @@ -200,9 +216,9 @@ def filter_group_search_text( def filter_group_search_text_single( self, client: QdrantBase, query_filter: models.Filter ) -> models.GroupsResult: - return client.search_groups( + return client.query_points_groups( collection_name=COLLECTION_NAME, - query_vector=self.query_text, + query=self.query_text, query_filter=query_filter, with_payload=models.PayloadSelectorExclude(exclude=["city.geo", "rand_number"]), with_vectors=True, @@ -244,29 +260,6 @@ def test_group_search_types(): delete_fixture_collection(local_client) delete_fixture_collection(remote_client) - fixture_points = generate_fixtures() - init_client(local_client, fixture_points) - init_client(remote_client, fixture_points) - - query_vector_tuple = ("text", query_vector_list) - compare_client_results( - local_client, - remote_client, - searcher.group_search, - query_vector=query_vector_tuple, - ) - - query_named_vector = types.NamedVector(name="text", vector=query_vector_list) - compare_client_results( - local_client, - remote_client, - searcher.group_search, - query_vector=query_named_vector, - ) - - delete_fixture_collection(local_client) - delete_fixture_collection(remote_client) - def test_simple_group_search(): fixture_points = generate_fixtures() diff --git a/tests/congruence_tests/test_multivector_search_queries.py b/tests/congruence_tests/test_multivector_search_queries.py index 27972c23..78bef24b 100644 --- a/tests/congruence_tests/test_multivector_search_queries.py +++ b/tests/congruence_tests/test_multivector_search_queries.py @@ -236,10 +236,14 @@ def test_search_invalid_vector_type(): vector_invalid_type = {"multi-text": [1, 2, 3, 4]} with pytest.raises(ValueError): - local_client.search(collection_name=COLLECTION_NAME, query_vector=vector_invalid_type) + local_client.query_points( + collection_name=COLLECTION_NAME, query=vector_invalid_type, using="multi-text" + ) with pytest.raises(ValueError): - remote_client.search(collection_name=COLLECTION_NAME, query_vector=vector_invalid_type) + remote_client.query_points( + collection_name=COLLECTION_NAME, query=vector_invalid_type, using="multi-text" + ) def test_query_with_nan(): diff --git a/tests/congruence_tests/test_query.py b/tests/congruence_tests/test_query.py index 40974ee3..41c0d74f 100644 --- a/tests/congruence_tests/test_query.py +++ b/tests/congruence_tests/test_query.py @@ -363,6 +363,20 @@ def dense_query_rrf(self, client: QdrantBase) -> models.QueryResponse: limit=10, ) + def dense_query_parametrized_rrf(self, client: QdrantBase) -> models.QueryResponse: + return client.query_points( + collection_name=COLLECTION_NAME, + prefetch=[ + models.Prefetch( + query=self.dense_vector_query_text, + using="text", + ) + ], + query=models.RrfQuery(rrf=models.Rrf(k=10)), + with_payload=True, + limit=10, + ) + def dense_query_rrf_plain_prefetch(self, client: QdrantBase) -> models.QueryResponse: # dense_query_rrf has a list of prefetches, here we have just a prefetch return client.query_points( @@ -1276,6 +1290,10 @@ def test_dense_query_fusion(): local_client, http_client, grpc_client, searcher.deep_dense_queries_dbsf ) + compare_clients_results( + local_client, http_client, grpc_client, searcher.dense_query_parametrized_rrf + ) + def test_dense_query_discovery_context(): n_vectors = 250 diff --git a/tests/congruence_tests/test_recommendation.py b/tests/congruence_tests/test_recommendation.py index 2ad38857..b8535e39 100644 --- a/tests/congruence_tests/test_recommendation.py +++ b/tests/congruence_tests/test_recommendation.py @@ -28,44 +28,50 @@ def __init__(self): @classmethod def simple_recommend_image(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[]) + ), with_payload=True, limit=10, using="image", - ) + ).points @classmethod def many_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10, 19], + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10, 19])), with_payload=True, limit=10, using="image", - ) + ).points @classmethod def simple_recommend_negative(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], - negative=[15, 7], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[15, 7]) + ), with_payload=True, limit=10, using="image", - ) + ).points @classmethod def recommend_from_another_collection( cls, client: QdrantBase, positive_point_id: Optional[int] = None ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10] if positive_point_id is None else [positive_point_id], - negative=[15, 7] if positive_point_id is None else [], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10] if positive_point_id is None else [positive_point_id], + negative=[15, 7] if positive_point_id is None else [], + ) + ), with_payload=True, limit=10, using="image", @@ -73,194 +79,224 @@ def recommend_from_another_collection( collection=secondary_collection_name, vector="image", ), - ) + ).points @classmethod def filter_recommend_text( cls, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10])), query_filter=query_filter, with_payload=True, limit=10, using="text", - ) + ).points @classmethod def best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], + negative=[], + strategy=models.RecommendStrategy.BEST_SCORE, + ) + ), with_payload=True, limit=10, using="image", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def best_score_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[11, 21], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], + negative=[11, 21], + strategy=models.RecommendStrategy.BEST_SCORE, + ) + ), with_payload=True, limit=10, using="code", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def only_negatives_best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE + ) + ), with_payload=True, limit=10, using="image", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def only_negatives_best_score_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, + negative=[10, 12], + strategy="best_score", # type: ignore # check it works with a literal + ) + ), with_payload=True, limit=10, using="code", - strategy="best_score", # type: ignore # check it works with a literal - ) + ).points @classmethod def sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], negative=[], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=True, limit=10, using="image", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def sum_scores_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[11, 21], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], + negative=[11, 21], + strategy=models.RecommendStrategy.SUM_SCORES, + ) + ), with_payload=True, limit=10, using="code", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def only_negatives_sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=True, limit=10, using="image", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def only_negatives_sum_scores_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy="sum_scores" + ) # type: ignore # check it works with a literal + ), with_payload=True, limit=10, using="code", - strategy="sum_scores", # type: ignore # check it works with a literal - ) + ).points @classmethod def avg_vector_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10, 13], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 13], + negative=[], + strategy=models.RecommendStrategy.AVERAGE_VECTOR, + ) + ), with_payload=True, limit=10, using="image", - strategy=models.RecommendStrategy.AVERAGE_VECTOR, - ) + ).points def recommend_from_raw_vectors(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[self.query_image], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[self.query_image], negative=[]) + ), with_payload=True, limit=10, using="image", - ) + ).points def recommend_from_raw_vectors_and_ids(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[self.query_image, 10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[self.query_image, 10], negative=[]), + ), with_payload=True, limit=10, using="image", - ) + ).points @staticmethod - def recommend_batch(client: QdrantBase) -> list[list[models.ScoredPoint]]: - return client.recommend_batch( + def recommend_batch(client: QdrantBase) -> list[models.QueryResponse]: + return client.query_batch_points( collection_name=COLLECTION_NAME, requests=[ - models.RecommendRequest( - positive=[3], - negative=[], + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[3], + negative=None, + strategy=models.RecommendStrategy.AVERAGE_VECTOR, + ) + ), limit=1, using="image", - strategy=models.RecommendStrategy.AVERAGE_VECTOR, ), - models.RecommendRequest( - positive=[10], - negative=[], + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10], + negative=[], + strategy=models.RecommendStrategy.BEST_SCORE, + ) + ), limit=2, using="image", - strategy=models.RecommendStrategy.BEST_SCORE, lookup_from=models.LookupLocation( collection=secondary_collection_name, vector="image", ), ), - models.RecommendRequest( - positive=[4], - negative=[], + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[4], + negative=[], + strategy=models.RecommendStrategy.SUM_SCORES, + ) + ), limit=2, using="image", - strategy=models.RecommendStrategy.SUM_SCORES, ), ], ) @@ -359,33 +395,37 @@ def test_query_with_nan(): init_client(remote_client, fixture_points) with pytest.raises(AssertionError): - local_client.recommend( + local_client.query_points( collection_name=COLLECTION_NAME, - positive=[vector], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[vector], negative=[]) + ), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.recommend( + remote_client.query_points( collection_name=COLLECTION_NAME, - positive=[vector], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[vector], negative=[]) + ), using=using, ) with pytest.raises(AssertionError): - local_client.recommend( + local_client.query_points( collection_name=COLLECTION_NAME, - positive=[1], - negative=[vector], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[1], negative=[vector]), + ), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.recommend( + remote_client.query_points( collection_name=COLLECTION_NAME, - positive=[1], - negative=[vector], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[1], negative=[vector]), + ), using=using, ) diff --git a/tests/congruence_tests/test_search.py b/tests/congruence_tests/test_search.py index 28213a93..4ab9f23d 100644 --- a/tests/congruence_tests/test_search.py +++ b/tests/congruence_tests/test_search.py @@ -27,122 +27,134 @@ def __init__(self): self.query_code = np.random.random(code_vector_size).tolist() def simple_search_text(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + query=self.query_text, + using="text", with_payload=True, limit=10, - ) + ).points def simple_search_image(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + using="image", + query=self.query_image, with_payload=True, limit=10, - ) + ).points def simple_search_code(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("code", self.query_code), + query=self.query_code, + using="code", with_payload=True, limit=10, - ) + ).points def simple_search_text_offset(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=True, limit=10, offset=10, - ) + ).points def simple_search_text_with_vector(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=True, with_vectors=True, limit=10, offset=10, - ) + ).points def search_score_threshold(self, client: QdrantBase) -> list[models.ScoredPoint]: - res1 = client.search( + res1 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + query=self.query_text, + using="text", with_payload=True, limit=10, score_threshold=0.9, - ) + ).points - res2 = client.search( + res2 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + query=self.query_text, + using="text", with_payload=True, limit=10, score_threshold=0.95, - ) + ).points - res3 = client.search( + res3 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + query=self.query_text, + using="text", with_payload=True, limit=10, score_threshold=0.1, - ) + ).points return res1 + res2 + res3 def simple_search_text_select_payload(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=["text_array", "nested.id"], limit=10, - ) + ).points def search_payload_exclude(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["text_array", "nested.id"]), limit=10, - ) + ).points def simple_search_image_select_vector(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("image", self.query_image), + using="image", + query=self.query_image, with_payload=False, with_vectors=["image", "code"], limit=10, - ) + ).points def filter_search_text( self, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", self.query_text), + using="text", + query=self.query_text, query_filter=query_filter, with_payload=True, limit=10, - ) + ).points def filter_search_text_single( self, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=self.query_text, + query=self.query_text, query_filter=query_filter, with_payload=True, with_vectors=True, limit=10, - ) + ).points def test_simple_search(): @@ -331,10 +343,10 @@ def test_search_invalid_vector_type(): vector_invalid_type = {"text": [1, 2, 3, 4]} with pytest.raises(ValueError): - local_client.search(collection_name=COLLECTION_NAME, query_vector=vector_invalid_type) + local_client.query_points(collection_name=COLLECTION_NAME, query=vector_invalid_type) with pytest.raises(ValueError): - remote_client.search(collection_name=COLLECTION_NAME, query_vector=vector_invalid_type) + remote_client.query_points(collection_name=COLLECTION_NAME, query=vector_invalid_type) def test_query_with_nan(): @@ -348,11 +360,12 @@ def test_query_with_nan(): vector = np.random.random(text_vector_size) vector[4] = np.nan - query_vector = ("text", vector.tolist()) + query_vector = vector.tolist() + with pytest.raises(AssertionError): - local_client.search(COLLECTION_NAME, query_vector) + local_client.query_points(COLLECTION_NAME, query_vector, using="text") with pytest.raises(UnexpectedResponse): - remote_client.search(COLLECTION_NAME, query_vector) + remote_client.query_points(COLLECTION_NAME, query_vector, using="text") single_vector_config = models.VectorParams( size=text_vector_size, distance=models.Distance.COSINE @@ -369,6 +382,6 @@ def test_query_with_nan(): init_client(remote_client, fixture_points, vectors_config=single_vector_config) with pytest.raises(AssertionError): - local_client.search(COLLECTION_NAME, vector.tolist()) + local_client.query_points(COLLECTION_NAME, vector.tolist()) with pytest.raises(UnexpectedResponse): - remote_client.search(COLLECTION_NAME, vector.tolist()) + remote_client.query_points(COLLECTION_NAME, vector.tolist()) diff --git a/tests/congruence_tests/test_sparse_discovery.py b/tests/congruence_tests/test_sparse_discovery.py index 88095fbc..d53ff163 100644 --- a/tests/congruence_tests/test_sparse_discovery.py +++ b/tests/congruence_tests/test_sparse_discovery.py @@ -6,7 +6,6 @@ from qdrant_client import QdrantClient, models from qdrant_client.client_base import QdrantBase from qdrant_client.http.exceptions import UnexpectedResponse -from qdrant_client.http.models import ContextExamplePair from tests.congruence_tests.test_common import ( COLLECTION_NAME, compare_client_results, @@ -76,9 +75,9 @@ def test_context( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=10, negative=19)], + query=models.ContextQuery(context=[models.ContextPair(positive=10, negative=19)]), with_payload=True, limit=200, using="sparse-image", @@ -101,21 +100,24 @@ def test_context_many_pairs( )["sparse-image"] def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[ - models.ContextExamplePair(positive=11, negative=19), - models.ContextExamplePair(positive=100, negative=199), - models.ContextExamplePair( - positive=random_sparse_image_vector_1, negative=random_sparse_image_vector_2 - ), - models.ContextExamplePair(positive=30, negative=random_sparse_image_vector_2), - models.ContextExamplePair(positive=random_sparse_image_vector_1, negative=15), - ], + query=models.ContextQuery( + context=[ + models.ContextPair(positive=11, negative=19), + models.ContextPair(positive=100, negative=199), + models.ContextPair( + positive=random_sparse_image_vector_1, + negative=random_sparse_image_vector_2, + ), + models.ContextPair(positive=30, negative=random_sparse_image_vector_2), + models.ContextPair(positive=random_sparse_image_vector_1, negative=15), + ] + ), with_payload=True, limit=200, using="sparse-image", - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -127,14 +129,17 @@ def test_discover( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=11, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=11, negative=19)] + ) + ), with_payload=True, limit=100, using="sparse-image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -150,13 +155,17 @@ def test_discover_raw_target( ] def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=random_sparse_image_vector, - context=[models.ContextExamplePair(positive=10, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=random_sparse_image_vector, + context=[models.ContextPair(positive=10, negative=19)], + ) + ), limit=100, using="sparse-image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -172,13 +181,17 @@ def test_context_raw_positive( ] def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=random_sparse_image_vector, negative=19)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, + context=[models.ContextPair(positive=random_sparse_image_vector, negative=19)], + ) + ), limit=10, using="sparse-image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -190,13 +203,13 @@ def test_only_target( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, + query=models.DiscoverQuery(discover=models.DiscoverInput(target=10, context=[])), with_payload=True, limit=10, using="sparse-image", - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -208,10 +221,13 @@ def test_discover_from_another_collection( grpc_client, ): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=15, negative=7)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=15, negative=7)] + ) + ), with_payload=True, limit=10, using="sparse-image", @@ -219,7 +235,7 @@ def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: collection=secondary_collection_name, vector="sparse-image", ), - ) + ).points compare_client_results(grpc_client, http_client, f) compare_client_results(local_client, http_client, f) @@ -230,19 +246,25 @@ def test_discover_batch( http_client, grpc_client, ): - def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[list[models.ScoredPoint]]: - return client.discover_batch( + def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.QueryResponse]: + return client.query_batch_points( collection_name=COLLECTION_NAME, requests=[ - models.DiscoverRequest( - target=10, - context=[models.ContextExamplePair(positive=15, negative=7)], + models.QueryRequest( + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=15, negative=7)] + ) + ), limit=5, using="sparse-image", ), - models.DiscoverRequest( - target=11, - context=[models.ContextExamplePair(positive=15, negative=17)], + models.QueryRequest( + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=11, context=[models.ContextPair(positive=15, negative=17)] + ) + ), limit=6, using="sparse-image", lookup_from=models.LookupLocation( @@ -260,26 +282,29 @@ def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[list[models.ScoredPo @pytest.mark.parametrize("filter_", [one_random_filter_please() for _ in range(10)]) def test_discover_with_filters(local_client, http_client, grpc_client, filter_: models.Filter): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - target=10, - context=[models.ContextExamplePair(positive=15, negative=7)], + query=models.DiscoverQuery( + discover=models.DiscoverInput( + target=10, context=[models.ContextPair(positive=15, negative=7)] + ) + ), limit=15, using="sparse-image", query_filter=filter_, - ) + ).points @pytest.mark.parametrize("filter_", [one_random_filter_please() for _ in range(10)]) def test_context_with_filters(local_client, http_client, grpc_client, filter_: models.Filter): def f(client: QdrantBase, **kwargs: dict[str, Any]) -> list[models.ScoredPoint]: - return client.discover( + return client.query_points( collection_name=COLLECTION_NAME, - context=[models.ContextExamplePair(positive=15, negative=7)], + query=models.ContextQuery(context=[models.ContextPair(positive=15, negative=7)]), limit=200, using="sparse-image", query_filter=filter_, - ) + ).points compare_client_results(grpc_client, http_client, f, is_context_search=True) compare_client_results(local_client, http_client, f, is_context_search=True) @@ -321,18 +346,24 @@ def test_query_with_nan(): [None, sparse_vector, sparse_vector_with_nan], [sparse_vector_with_nan, sparse_vector, sparse_vector_2], ): + if target is not None: + query = models.DiscoverQuery( + discover=models.DiscoverInput( + target=target, context=[models.ContextPair(positive=pos, negative=neg)] + ) + ) + else: + query = models.ContextQuery(context=models.ContextPair(positive=pos, negative=neg)) with pytest.raises(AssertionError): - local_client.discover( + local_client.query_points( collection_name=COLLECTION_NAME, - target=target, - context=[ContextExamplePair(positive=pos, negative=neg)], + query=query, using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.discover( + remote_client.query_points( collection_name=COLLECTION_NAME, - target=target, - context=[ContextExamplePair(positive=pos, negative=neg)], + query=query, using=using, ) diff --git a/tests/congruence_tests/test_sparse_idf_search.py b/tests/congruence_tests/test_sparse_idf_search.py index a8656aa3..adb09d50 100644 --- a/tests/congruence_tests/test_sparse_idf_search.py +++ b/tests/congruence_tests/test_sparse_idf_search.py @@ -1,5 +1,4 @@ from qdrant_client.client_base import QdrantBase -from qdrant_client.conversions.common_types import NamedSparseVector from qdrant_client.http.models import models from tests.congruence_tests.test_common import ( COLLECTION_NAME, @@ -26,13 +25,14 @@ def __init__(self): self.query_text = generate_random_sparse_vector(sparse_text_vector_size, density=0.1) def simple_search_text(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=True, with_vectors=["sparse-text"], limit=10, - ) + ).points def test_simple_search(): diff --git a/tests/congruence_tests/test_sparse_recommend.py b/tests/congruence_tests/test_sparse_recommend.py index 10159608..1d660f7d 100644 --- a/tests/congruence_tests/test_sparse_recommend.py +++ b/tests/congruence_tests/test_sparse_recommend.py @@ -30,42 +30,45 @@ def __init__(self): @classmethod def simple_recommend_image(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[]) + ), with_payload=True, limit=10, using="sparse-image", - ) + ).points @classmethod def many_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10, 19], + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10, 19])), with_payload=True, limit=10, using="sparse-image", - ) + ).points @classmethod def simple_recommend_negative(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], - negative=[15, 7], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[15, 7]) + ), with_payload=True, limit=10, using="sparse-image", - ) + ).points @classmethod def recommend_from_another_collection(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], - negative=[15, 7], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[10], negative=[15, 7]) + ), with_payload=True, limit=10, using="sparse-image", @@ -73,183 +76,205 @@ def recommend_from_another_collection(cls, client: QdrantBase) -> list[models.Sc collection=secondary_collection_name, vector="sparse-image", ), - ) + ).points @classmethod def filter_recommend_text( cls, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10], + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[10])), query_filter=query_filter, with_payload=True, limit=10, using="sparse-text", - ) + ).points @classmethod def best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], negative=[], strategy=models.RecommendStrategy.BEST_SCORE + ) + ), with_payload=True, limit=10, using="sparse-image", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def best_score_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[11, 21], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], + negative=[11, 21], + strategy=models.RecommendStrategy.BEST_SCORE, + ) + ), with_payload=True, limit=10, using="sparse-code", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def only_negatives_best_score_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE + ) + ), with_payload=True, limit=10, using="sparse-image", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def only_negatives_best_score_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.BEST_SCORE + ) + ), with_payload=True, limit=10, using="sparse-code", - strategy=models.RecommendStrategy.BEST_SCORE, - ) + ).points @classmethod def sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], negative=[], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=True, limit=10, using="sparse-image", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def sum_scores_recommend_euclid(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[ - 10, - 20, - ], - negative=[11, 21], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 20], + negative=[11, 21], + strategy=models.RecommendStrategy.SUM_SCORES, + ) + ), with_payload=True, limit=10, using="sparse-code", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def only_negatives_sum_scores_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=True, limit=10, using="sparse-image", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def only_negatives_sum_scores_recommend_euclid( cls, client: QdrantBase ) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=None, - negative=[10, 12], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=None, negative=[10, 12], strategy=models.RecommendStrategy.SUM_SCORES + ) + ), with_payload=True, limit=10, using="sparse-code", - strategy=models.RecommendStrategy.SUM_SCORES, - ) + ).points @classmethod def avg_vector_recommend(cls, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[10, 13], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10, 13], + negative=[], + strategy=models.RecommendStrategy.AVERAGE_VECTOR, + ) + ), with_payload=True, limit=10, using="sparse-image", - strategy=models.RecommendStrategy.AVERAGE_VECTOR, - ) + ).points def recommend_from_raw_vectors(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[self.query_image], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[self.query_image], negative=[]) + ), with_payload=True, limit=10, using="sparse-image", - ) + ).points def recommend_from_raw_vectors_and_ids(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.recommend( + return client.query_points( collection_name=COLLECTION_NAME, - positive=[self.query_image, 10], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[self.query_image, 10], negative=[]) + ), with_payload=True, limit=10, using="sparse-image", - ) + ).points @staticmethod - def recommend_batch(client: QdrantBase) -> list[list[models.ScoredPoint]]: - return client.recommend_batch( + def recommend_batch(client: QdrantBase) -> list[models.QueryResponse]: + return client.query_batch_points( collection_name=COLLECTION_NAME, requests=[ - models.RecommendRequest( - positive=[3], - negative=[], + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[3], + negative=[], + strategy=models.RecommendStrategy.AVERAGE_VECTOR, + ) + ), limit=1, using="sparse-image", - strategy=models.RecommendStrategy.AVERAGE_VECTOR, ), - models.RecommendRequest( - positive=[10], - negative=[], + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=[10], + negative=[], + strategy=models.RecommendStrategy.BEST_SCORE, + ) + ), limit=2, using="sparse-image", - strategy=models.RecommendStrategy.BEST_SCORE, lookup_from=models.LookupLocation( collection=secondary_collection_name, vector="sparse-image", @@ -362,33 +387,37 @@ def test_query_with_nan(): ) with pytest.raises(AssertionError): - local_client.recommend( + local_client.query_points( collection_name=COLLECTION_NAME, - positive=[sparse_vector], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[sparse_vector], negative=[]) + ), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.recommend( + remote_client.query_points( collection_name=COLLECTION_NAME, - positive=[sparse_vector], - negative=[], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[sparse_vector], negative=[]) + ), using=using, ) with pytest.raises(AssertionError): - local_client.recommend( + local_client.query_points( collection_name=COLLECTION_NAME, - positive=[1], - negative=[sparse_vector], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[1], negative=[sparse_vector]) + ), using=using, ) with pytest.raises(UnexpectedResponse): - remote_client.recommend( + remote_client.query_points( collection_name=COLLECTION_NAME, - positive=[1], - negative=[sparse_vector], + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[1], negative=[sparse_vector]) + ), using=using, ) diff --git a/tests/congruence_tests/test_sparse_search.py b/tests/congruence_tests/test_sparse_search.py index 260835ee..4702df9b 100644 --- a/tests/congruence_tests/test_sparse_search.py +++ b/tests/congruence_tests/test_sparse_search.py @@ -2,10 +2,8 @@ import pytest from qdrant_client.client_base import QdrantBase -from qdrant_client.conversions.common_types import NamedSparseVector from qdrant_client.http.exceptions import UnexpectedResponse from qdrant_client.http.models import models -from qdrant_client.local.sparse import sort_sparse_vector from tests.congruence_tests.test_common import ( COLLECTION_NAME, compare_client_results, @@ -31,115 +29,114 @@ def __init__(self): self.query_code = generate_random_sparse_vector(sparse_code_vector_size, density=0.1) def simple_search_text(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=True, with_vectors=["sparse-text"], limit=10, - ) + ).points def simple_search_image(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-image", vector=self.query_image), + query=self.query_image, + using="sparse-image", with_payload=True, with_vectors=["sparse-image"], limit=10, - ) + ).points def simple_search_code(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-code", vector=self.query_code), + using="sparse-code", + query=self.query_code, with_payload=True, with_vectors=True, limit=10, - ) + ).points def simple_search_text_offset(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + query=self.query_text, + using="sparse-text", with_payload=True, limit=10, offset=10, - ) + ).points def search_score_threshold(self, client: QdrantBase) -> list[models.ScoredPoint]: - res1 = client.search( + res1 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=True, limit=10, score_threshold=0.9, - ) + ).points - res2 = client.search( + res2 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=True, limit=10, score_threshold=0.95, - ) + ).points - res3 = client.search( + res3 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=True, limit=10, score_threshold=0.1, - ) + ).points return res1 + res2 + res3 def simple_search_text_select_payload(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=["text_array", "nested.id"], limit=10, - ) + ).points def search_payload_exclude(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), + using="sparse-text", + query=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=["text_array", "nested.id"]), limit=10, - ) + ).points def simple_search_image_select_vector(self, client: QdrantBase) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-image", vector=self.query_image), + using="sparse-image", + query=self.query_image, with_payload=False, with_vectors=["sparse-image", "sparse-code"], limit=10, - ) + ).points def filter_search_text( self, client: QdrantBase, query_filter: models.Filter ) -> list[models.ScoredPoint]: - return client.search( - collection_name=COLLECTION_NAME, - query_vector=NamedSparseVector(name="sparse-text", vector=self.query_text), - query_filter=query_filter, - with_payload=True, - limit=10, - ) - - def filter_search_text_single( - self, client: QdrantBase, query_filter: models.Filter - ) -> list[models.ScoredPoint]: - return client.search( + return client.query_points( collection_name=COLLECTION_NAME, - query_vector=self.query_text, # why it is not a NamedSparseVector? + using="sparse-text", + query=self.query_text, query_filter=query_filter, with_payload=True, - with_vectors=True, limit=10, - ) + ).points def default_mmr_query(self, client: QdrantBase) -> models.QueryResponse: return client.query_points( @@ -343,10 +340,8 @@ def test_query_with_nan(): fixture_points = generate_sparse_fixtures() sparse_vector = random_sparse_vectors({"sparse-text": sparse_text_vector_size}) - named_sparse_vector = models.NamedSparseVector( - name="sparse-text", vector=sparse_vector["sparse-text"] - ) - named_sparse_vector.vector.values[0] = np.nan + + sparse_vector["sparse-text"].values[0] = np.nan local_client.create_collection( COLLECTION_NAME, vectors_config={}, sparse_vectors_config=sparse_vectors_config @@ -370,6 +365,10 @@ def test_query_with_nan(): ) with pytest.raises(AssertionError): - local_client.search(COLLECTION_NAME, named_sparse_vector) + local_client.query_points( + COLLECTION_NAME, sparse_vector["sparse-text"], using="sparse-text" + ) with pytest.raises(UnexpectedResponse): - remote_client.search(COLLECTION_NAME, named_sparse_vector) + remote_client.query_points( + COLLECTION_NAME, sparse_vector["sparse-text"], using="sparse-text" + ) diff --git a/tests/congruence_tests/test_updates.py b/tests/congruence_tests/test_updates.py index ee05d4e7..edda787f 100644 --- a/tests/congruence_tests/test_updates.py +++ b/tests/congruence_tests/test_updates.py @@ -1,6 +1,7 @@ import itertools import uuid from collections import defaultdict +import random import numpy as np import pytest @@ -315,15 +316,15 @@ def test_upload_wrong_vectors(): # does not raise without wait=True with pytest.raises(qdrant_client.http.exceptions.UnexpectedResponse): - remote_client.upload_records( + remote_client.upload_points( wrong_vectors_collection, - records=[models.Record(id=3, vector=dense_vector)], + points=[models.PointStruct(id=3, vector=dense_vector)], wait=True, ) with pytest.raises(ValueError): - local_client.upload_records( - wrong_vectors_collection, records=[models.Record(id=3, vector=dense_vector)] + local_client.upload_points( + wrong_vectors_collection, points=[models.PointStruct(id=3, vector=dense_vector)] ) unnamed_vector = [0.1, 0.3] @@ -502,3 +503,207 @@ def test_update_vectors(): local_client.delete_collection(collection_name=COLLECTION_NAME) remote_client.delete_collection(collection_name=COLLECTION_NAME) # endregion + + +@pytest.mark.parametrize("prefer_grpc", [False, True]) +def test_update_filter(prefer_grpc): + local_client = init_local() + remote_client = init_remote(prefer_grpc=prefer_grpc) + + vectors_config = models.VectorParams(size=2, distance=models.Distance.DOT) + local_client.create_collection(collection_name=COLLECTION_NAME, vectors_config=vectors_config) + if remote_client.collection_exists(collection_name=COLLECTION_NAME): + remote_client.delete_collection(collection_name=COLLECTION_NAME) + remote_client.create_collection(collection_name=COLLECTION_NAME, vectors_config=vectors_config) + + original_vector = [random.random(), random.random()] + original_points = [ + models.PointStruct(id=1, vector=original_vector[:], payload={"digit": 1}), + models.PointStruct(id=2, vector=original_vector[:], payload={"digit": 2}), + ] + + local_client.upsert(COLLECTION_NAME, points=original_points) + remote_client.upsert(COLLECTION_NAME, points=original_points) + # collection points: + # id=1, vector=original_vector, payload={digit: 1} + # id=2, vector=original_vector, payload={digit: 2} + + new_points = [ + models.PointStruct(id=1, vector=original_vector[:], payload={"digit": 3}), + models.PointStruct(id=2, vector=original_vector[:], payload={"digit": 4}), + models.PointStruct(id=3, vector=original_vector[:], payload={"digit": 5}), + ] + + update_filter = models.Filter( + must=models.FieldCondition(key="digit", match=models.MatchValue(value=1)) + ) + local_client.upsert(COLLECTION_NAME, points=new_points, update_filter=update_filter) + remote_client.upsert(COLLECTION_NAME, points=new_points, update_filter=update_filter) + # collection points: + # id=1, vector=original_vector, payload={digit: 3} + # id=2, vector=original_vector, payload={digit: 2} + # id=3, vector=original_vector, payload={digit: 5} + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2, 3]) + assert retrieved_points[0].payload["digit"] == 3 + assert retrieved_points[1].payload["digit"] == 2 + assert len(retrieved_points) == 3 + + update_filter = models.Filter( + must=models.FieldCondition(key="digit", match=models.MatchValue(value=3)) + ) + new_vector = (-np.array(original_vector[:])).tolist() + new_point_vectors = [ + models.PointVectors(id=1, vector=new_vector[:]), + models.PointVectors(id=2, vector=new_vector[:]), + ] + local_client.update_vectors( + COLLECTION_NAME, points=new_point_vectors, update_filter=update_filter + ) + remote_client.update_vectors( + COLLECTION_NAME, points=new_point_vectors, update_filter=update_filter + ) + # collection points: + # id=1, vector=-original_vector, payload={digit: 3} + # id=2, vector=original_vector, payload={digit: 2} + # id=3, vector=original_vector, payload={digit: 5} + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve( + collection_name=COLLECTION_NAME, ids=[1, 2], with_vectors=True + ) + assert np.allclose(retrieved_points[0].vector, new_vector) + assert np.allclose(retrieved_points[1].vector, original_vector) + + new_points_2 = [ + models.PointStruct(id=1, vector=original_vector[:], payload={"digit": 1}), + models.PointStruct(id=2, vector=new_vector, payload={"digit": 99}), + ] + + update_filter = models.Filter( + must=models.FieldCondition(key="digit", match=models.MatchValue(value=3)) + ) + + local_client.upload_points(COLLECTION_NAME, points=new_points_2, update_filter=update_filter) + remote_client.upload_points(COLLECTION_NAME, points=new_points_2, update_filter=update_filter) + # collection points: + # id=1, vector=original_vector, payload={digit: 1} + # id=2, vector=original_vector, payload={digit: 2} + # id=3, vector=original_vector, payload={digit: 5} + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2]) + assert retrieved_points[0].payload["digit"] == 1 + assert retrieved_points[1].payload["digit"] == 2 + + new_points_3 = [ + models.PointStruct(id=1, vector=original_vector[:], payload={"digit": 3}), + models.PointStruct(id=2, vector=original_vector[:], payload={"digit": 99}), + ] + update_filter = models.Filter( + must=models.FieldCondition(key="digit", match=models.MatchValue(value=1)) + ) + + local_client.upload_points( + COLLECTION_NAME, points=new_points_3, update_filter=update_filter, batch_size=1, parallel=2 + ) + remote_client.upload_points( + COLLECTION_NAME, points=new_points_3, update_filter=update_filter, batch_size=1, parallel=2 + ) + # collection points: + # id=1, vector=original_vector, payload={digit: 3} + # id=2, vector=original_vector, payload={digit: 2} + # id=3, vector=original_vector, payload={digit: 5} + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2]) + assert retrieved_points[0].payload["digit"] == 3 + assert retrieved_points[1].payload["digit"] == 2 + + vectors = [original_vector[:], original_vector[:]] + ids = [1, 2] + payload = [ + {"digit": 1}, + {"digit": 99}, + ] + update_filter = models.Filter( + must=models.FieldCondition(key="digit", match=models.MatchValue(value=3)) + ) + + # not testing MP upload_collection, since upload_points uses _upload_collection under the hood + local_client.upload_collection( + COLLECTION_NAME, vectors=vectors, ids=ids, payload=payload, update_filter=update_filter + ) + remote_client.upload_collection( + COLLECTION_NAME, vectors=vectors, ids=ids, payload=payload, update_filter=update_filter + ) + # collection points: + # id=1, vector=original_vector, payload={digit: 1} + # id=2, vector=original_vector, payload={digit: 2} + # id=3, vector=original_vector, payload={digit: 5} + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2]) + assert retrieved_points[0].payload["digit"] == 1 + assert retrieved_points[1].payload["digit"] == 2 + + ids = [1, 2, 4] + vectors = [original_vector[:], original_vector[:], original_vector[:]] + payload = [{"digit": 3}, {"digit": 0}, {"digit": 4}] + points_batch = models.PointsBatch( + batch=models.Batch(ids=ids, vectors=vectors, payloads=payload), + update_filter=models.Filter(must=models.HasIdCondition(has_id=[1])), + ) + + point_vectors = [ + models.PointVectors( + id=3, + vector=new_vector[:], + ), + models.PointVectors(id=1, vector=new_vector[:]), + ] + + upsert_batch = models.UpsertOperation(upsert=points_batch) + update_vectors = models.UpdateVectorsOperation( + update_vectors=models.UpdateVectors( + points=point_vectors, + update_filter=models.Filter(must=models.HasIdCondition(has_id=[3])), + ) + ) + + local_client.batch_update_points( + COLLECTION_NAME, update_operations=[upsert_batch, update_vectors] + ) + remote_client.batch_update_points( + COLLECTION_NAME, update_operations=[upsert_batch, update_vectors] + ) + + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + retrieved_points = local_client.retrieve( + collection_name=COLLECTION_NAME, ids=[1, 2, 3, 4], with_vectors=True + ) + assert retrieved_points[0].payload["digit"] == 3 # payload updated + assert retrieved_points[1].payload["digit"] == 2 # payload stays unchanged + assert np.allclose(retrieved_points[0].vector, original_vector) # vector stays unchanged + assert np.allclose(retrieved_points[2].vector, new_vector) # vector updated + assert len(retrieved_points) == 4 # not existing point inserted + + points_list = models.PointsList( + points=[ + models.PointStruct(id=1, vector=original_vector[:], payload={"digit": 1}), + models.PointStruct(id=2, vector=original_vector[:], payload={"digit": 99}), + models.PointStruct(id=5, vector=original_vector[:], payload={"digit": 5}), + ], + update_filter=models.Filter(must=models.HasIdCondition(has_id=[2])), + ) + upsert_points_list = models.UpsertOperation(upsert=points_list) + + local_client.batch_update_points(COLLECTION_NAME, update_operations=[upsert_points_list]) + remote_client.batch_update_points(COLLECTION_NAME, update_operations=[upsert_points_list]) + compare_collections(local_client, remote_client, 10, collection_name=COLLECTION_NAME) + + retrieved_points = local_client.retrieve(collection_name=COLLECTION_NAME, ids=[1, 2, 5]) + assert retrieved_points[0].payload["digit"] == 3 + assert retrieved_points[1].payload["digit"] == 99 + assert len(retrieved_points) == 3 diff --git a/tests/congruence_tests/test_uuids.py b/tests/congruence_tests/test_uuids.py new file mode 100644 index 00000000..31ff9d37 --- /dev/null +++ b/tests/congruence_tests/test_uuids.py @@ -0,0 +1,281 @@ +import uuid + +import numpy as np +import pytest + +from qdrant_client import models, QdrantClient + +from tests.congruence_tests.test_common import ( + init_local, + init_remote, + generate_fixtures, + compare_client_results, + compare_collections, +) +from tests.fixtures.payload import one_random_payload_please + +COLLECTION_NAME = "test_uuid_input_collection" + + +@pytest.mark.parametrize("prefer_grpc", (True, False)) +def test_uuid_input(prefer_grpc): + remote_client = init_remote(prefer_grpc=prefer_grpc) + local_client = init_local() + + text_dim = 100 + code_dim = 10 + fixture_points = generate_fixtures( + random_ids=True, vectors_sizes={"text": text_dim, "code": code_dim} + ) + vectors_config = { + "text": models.VectorParams(size=text_dim, distance=models.Distance.COSINE), + "code": models.VectorParams(size=code_dim, distance=models.Distance.COSINE), + } + + for point in fixture_points: + point.id = uuid.UUID(point.id) + predefined_id = uuid.uuid4() + + known_point = models.PointStruct( + id=predefined_id, + vector={ + "text": np.random.random(text_dim).tolist(), + }, + payload=one_random_payload_please(101), + ) + fixture_points.append(known_point) + for cl in (remote_client, local_client): + if cl.collection_exists(COLLECTION_NAME): + cl.delete_collection(COLLECTION_NAME) + + cl.create_collection( + COLLECTION_NAME, + vectors_config=vectors_config, + ) + cl.create_payload_index(COLLECTION_NAME, "field", models.PayloadSchemaType.KEYWORD) + cl.upsert(COLLECTION_NAME, fixture_points) + + def query_points_uuid(client: QdrantClient): + return client.query_points(COLLECTION_NAME, query=predefined_id, using="text", limit=1) + + compare_client_results(local_client, remote_client, query_points_uuid) + + random_query = np.random.random(text_dim).tolist() + id_filter = models.Filter(must=models.HasIdCondition(has_id=[predefined_id])) + + def query_points_filter_uuid(client: QdrantClient): + return client.query_points( + COLLECTION_NAME, + query=random_query, + using="text", + query_filter=id_filter, + ) + + compare_client_results(local_client, remote_client, query_points_filter_uuid) + + def query_batch_points_uuid(client: QdrantClient): + query_batch = [models.QueryRequest(query=predefined_id, using="text")] + return client.query_batch_points(COLLECTION_NAME, query_batch) + + compare_client_results(local_client, remote_client, query_batch_points_uuid) + + def query_points_groups_uuid(client: QdrantClient): + return client.query_points_groups( + COLLECTION_NAME, group_by="field", limit=1, using="text", query=predefined_id + ) + + compare_client_results(local_client, remote_client, query_points_groups_uuid) + + def query_points_groups_uuid_filter(client: QdrantClient): + return client.query_points_groups( + COLLECTION_NAME, + group_by="field", + limit=1, + using="text", + query=np.random.random(text_dim).tolist(), + query_filter=models.Filter(must=models.HasIdCondition(has_id=[predefined_id])), + ) + + compare_client_results(local_client, remote_client, query_points_groups_uuid_filter) + + def search_matrix_pairs_uuid_filter(client: QdrantClient): + return client.search_matrix_pairs(COLLECTION_NAME, query_filter=id_filter, using="text") + + compare_client_results(local_client, remote_client, search_matrix_pairs_uuid_filter) + + def search_matrix_offsets_uuid_filter(client: QdrantClient): + return client.search_matrix_offsets(COLLECTION_NAME, query_filter=id_filter, using="text") + + compare_client_results(local_client, remote_client, search_matrix_offsets_uuid_filter) + + cl.scroll(COLLECTION_NAME, scroll_filter=id_filter) + + def scroll_uuid_filter(client: QdrantClient): + return client.scroll(COLLECTION_NAME, scroll_filter=id_filter) + + compare_client_results(local_client, remote_client, scroll_uuid_filter) + + def facet_uuid_filter(client: QdrantClient): + return client.facet(COLLECTION_NAME, key="field", facet_filter=id_filter) + + compare_client_results(local_client, remote_client, facet_uuid_filter) + + def retrieve_uuid_filter(client: QdrantClient): + return client.retrieve(COLLECTION_NAME, ids=[predefined_id]) + + compare_client_results(local_client, remote_client, retrieve_uuid_filter) + + random_vector = np.random.random(text_dim).tolist() + random_named_vector = {"text": random_vector} + + for cl in (local_client, remote_client): + cl.update_vectors( + COLLECTION_NAME, + points=[models.PointVectors(id=predefined_id, vector=random_named_vector)], + update_filter=id_filter, + ) + + cl.delete_vectors(COLLECTION_NAME, vectors=["code"], points=id_filter) + cl.delete_vectors(COLLECTION_NAME, vectors=["code"], points=[predefined_id]) + cl.delete_vectors( + COLLECTION_NAME, vectors=["code"], points=models.PointIdsList(points=[predefined_id]) + ) + cl.delete_vectors( + COLLECTION_NAME, vectors=["code"], points=models.FilterSelector(filter=id_filter) + ) + + cl.delete(COLLECTION_NAME, points_selector=id_filter) + cl.delete(COLLECTION_NAME, points_selector=[predefined_id]) + cl.delete(COLLECTION_NAME, points_selector=models.PointIdsList(points=[predefined_id])) + cl.delete(COLLECTION_NAME, points_selector=models.FilterSelector(filter=id_filter)) + + cl.upsert( + COLLECTION_NAME, + points=[models.PointStruct(id=predefined_id, vector=random_named_vector)], + ) + cl.set_payload(COLLECTION_NAME, payload={"qwe": "rty"}, points=id_filter) + cl.set_payload(COLLECTION_NAME, payload={"qwe": "rty"}, points=[predefined_id]) + cl.set_payload( + COLLECTION_NAME, + payload={"qwe": "rty"}, + points=models.PointIdsList(points=[predefined_id]), + ) + cl.set_payload( + COLLECTION_NAME, payload={"qwe": "rty"}, points=models.FilterSelector(filter=id_filter) + ) + + cl.overwrite_payload(COLLECTION_NAME, payload={"qwe": "rty"}, points=id_filter) + cl.overwrite_payload(COLLECTION_NAME, payload={"qwe": "rty"}, points=[predefined_id]) + cl.overwrite_payload( + COLLECTION_NAME, + payload={"qwe": "rty"}, + points=models.PointIdsList(points=[predefined_id]), + ) + cl.overwrite_payload( + COLLECTION_NAME, payload={"qwe": "rty"}, points=models.FilterSelector(filter=id_filter) + ) + + cl.delete_payload(COLLECTION_NAME, keys=["qwe"], points=id_filter) + cl.delete_payload(COLLECTION_NAME, keys=["qwe"], points=[predefined_id]) + cl.delete_payload( + COLLECTION_NAME, keys=["qwe"], points=models.PointIdsList(points=[predefined_id]) + ) + cl.delete_payload( + COLLECTION_NAME, keys=["qwe"], points=models.FilterSelector(filter=id_filter) + ) + + cl.clear_payload(COLLECTION_NAME, points_selector=id_filter) + cl.clear_payload(COLLECTION_NAME, points_selector=[predefined_id]) + cl.clear_payload( + COLLECTION_NAME, points_selector=models.PointIdsList(points=[predefined_id]) + ) + cl.clear_payload(COLLECTION_NAME, points_selector=models.FilterSelector(filter=id_filter)) + + cl.upload_collection( + COLLECTION_NAME, + ids=[predefined_id], + vectors={"text": np.array([random_vector])}, + ) + + cl.batch_update_points( + COLLECTION_NAME, + update_operations=[ + models.UpsertOperation( + upsert=models.PointsBatch( + batch=models.Batch( + ids=[predefined_id], + vectors={"text": [random_vector]}, + ) + ) + ), + models.UpsertOperation( + upsert=models.PointsList( + points=[ + models.PointStruct( + id=predefined_id, + vector=random_named_vector, + ) + ] + ) + ), + models.SetPayloadOperation( + set_payload=models.SetPayload(payload={"qwe": "rty"}, filter=id_filter) + ), + models.SetPayloadOperation( + set_payload=models.SetPayload(payload={"qwe": "rty"}, points=[predefined_id]) + ), + models.OverwritePayloadOperation( + overwrite_payload=models.SetPayload(payload={"qwe": "rty"}, filter=id_filter) + ), + models.OverwritePayloadOperation( + overwrite_payload=models.SetPayload( + payload={"qwe": "rty"}, points=[predefined_id] + ) + ), + models.DeletePayloadOperation( + delete_payload=models.DeletePayload(keys=["qwe"], filter=id_filter) + ), + models.DeletePayloadOperation( + delete_payload=models.DeletePayload(keys=["qwe"], points=[predefined_id]) + ), + models.ClearPayloadOperation( + clear_payload=models.PointIdsList(points=[predefined_id]) + ), + models.ClearPayloadOperation( + clear_payload=models.FilterSelector(filter=id_filter) + ), + models.UpdateVectorsOperation( + update_vectors=models.UpdateVectors( + points=[ + models.PointVectors( + id=predefined_id, + vector=random_named_vector, + ) + ] + ), + ), + models.UpdateVectorsOperation( + update_vectors=models.UpdateVectors( + points=[ + models.PointVectors( + id=predefined_id, + vector=random_named_vector, + ) + ], + update_filter=id_filter, + ), + ), + models.DeleteVectorsOperation( + delete_vectors=models.DeleteVectors(filter=id_filter, vector=["code"]) + ), + models.DeleteVectorsOperation( + delete_vectors=models.DeleteVectors(points=[predefined_id], vector=["code"]) + ), + models.DeleteOperation(delete=models.PointIdsList(points=[predefined_id])), + models.DeleteOperation(delete=models.FilterSelector(filter=id_filter)), + ], + ) + + compare_collections( + local_client, remote_client, num_vectors=1000, collection_name=COLLECTION_NAME + ) diff --git a/tests/conversions/fixtures.py b/tests/conversions/fixtures.py index 1ccbc1a4..edbe1b9d 100644 --- a/tests/conversions/fixtures.py +++ b/tests/conversions/fixtures.py @@ -35,6 +35,7 @@ ) match_except_integers = grpc.Match(except_integers=grpc.RepeatedIntegers(integers=[1, 2, 3])) match_phrase = grpc.Match(phrase="hello") +match_text_any = grpc.Match(text_any="hello what's up") field_condition_match = grpc.FieldCondition(key="match_field", match=match_keyword) @@ -48,6 +49,7 @@ key="match_field", match=match_except_integers ) field_condition_match_phrase = grpc.FieldCondition(key="match_field", match=match_phrase) +field_condition_match_text_any = grpc.FieldCondition(key="match_field", match=match_text_any) range_ = grpc.Range( lt=1.0, @@ -124,6 +126,7 @@ condition_except_integers = grpc.Condition(field=field_condition_match_except_integers) condition_phrase = grpc.Condition(field=field_condition_match_phrase) +condition_text_any = grpc.Condition(field=field_condition_match_text_any) nested = grpc.NestedCondition( key="a.b.c", filter=grpc.Filter(must=[grpc.Condition(field=field_condition_range)]) @@ -144,6 +147,7 @@ condition_except_keywords, condition_except_integers, condition_phrase, + condition_text_any, ], should=[ condition_field_match, @@ -290,6 +294,7 @@ full_scan_threshold=10000, max_indexing_threads=0, on_disk=False, + inline_storage=True, ) hnsw_config_2 = grpc.HnswConfigDiff( @@ -355,6 +360,7 @@ ) } ), + max_payload_index_count=32, ) strict_mode_config_empty = grpc.StrictModeConfig( @@ -362,6 +368,8 @@ max_query_limit=100, ) +metadata = {"collection-setting": grpc.Value(integer_value=3)} + collection_config = grpc.CollectionConfig( params=collection_params, hnsw_config=hnsw_config, @@ -370,6 +378,15 @@ strict_mode_config=strict_mode_config, ) +collection_config_w_metadata = grpc.CollectionConfig( + params=collection_params, + hnsw_config=hnsw_config, + optimizer_config=optimizer_config, + wal_config=wal_config, + strict_mode_config=strict_mode_config, + metadata=metadata, +) + payload_value = { "int": 1, "float": 0.23, @@ -384,10 +401,6 @@ payload = payload_to_grpc({"payload": payload_value}) -single_vector = grpc.Vectors(vector=grpc.Vector(data=[1.0, 2.0, 3.0, 4.0])) -multi_vector = grpc.Vectors( - vector=grpc.Vector(data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], vectors_count=2) -) single_dense_vector = grpc.Vectors( vector=grpc.Vector(dense=grpc.DenseVector(data=[1.0, 2.0, 3.0])) ) @@ -402,7 +415,12 @@ ) single_multidense_vector = grpc.Vectors( vector=grpc.Vector( - multi_dense=grpc.MultiDenseVector(vectors=[grpc.DenseVector(data=[1.0, 2.0, 3.0])]) + multi_dense=grpc.MultiDenseVector( + vectors=[ + grpc.DenseVector(data=[1.0, 2.0, 3.0, 4.0]), + grpc.DenseVector(data=[13.0, 14.0, 15.0, 16.0]), + ] + ) ) ) document_with_options = grpc.Document( @@ -423,17 +441,26 @@ inference_object_without_options = grpc.InferenceObject(object=json_to_value("text"), model="bert") order_value_int = grpc.OrderValue(int=42) order_value_float = grpc.OrderValue(float=42.0) -single_vector_output = grpc.VectorsOutput(vector=grpc.VectorOutput(data=[1.0, 2.0, 3.0, 4.0])) +single_vector_output = grpc.VectorsOutput( + vector=grpc.VectorOutput(dense=grpc.DenseVector(data=[100.0, 200.0, 300.0, 400.0])) +) multi_vector_output = grpc.VectorsOutput( - vector=grpc.VectorOutput(data=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], vectors_count=2) + vector=grpc.VectorOutput( + multi_dense=grpc.MultiDenseVector( + vectors=[ + grpc.DenseVector(data=[1.0, 4.0, 77.0]), + grpc.DenseVector(data=[12.0, 25.0, 44.0]), + ] + ) + ) ) named_vectors_output = grpc.VectorsOutput( vectors=grpc.NamedVectorsOutput( vectors={ "sparse": grpc.VectorOutput( - data=[1.0, 2.0, 3.0, 4.0], indices=grpc.SparseIndices(data=[1, 2, 3, 4]) + sparse=grpc.SparseVector(values=[10.0, 20.0, 30.0], indices=[11, 22, 33]) ), - "dense": grpc.VectorOutput(data=[1.0, 2.0, 3.0, 4.0]), + "dense": grpc.VectorOutput(dense=grpc.DenseVector(data=[7.0, 8.0])), "multi": multi_vector_output.vector, } ) @@ -452,11 +479,11 @@ ) scored_point_order_value_float = grpc.ScoredPoint( id=point_id, - payload=payload, - score=0.99, + # payload=payload, + # score=0.99, vectors=named_vectors_output, - version=12, - order_value=order_value_float, + # version=12, + # order_value=order_value_float, ) scored_point_multivector = grpc.ScoredPoint( id=point_id, @@ -479,13 +506,11 @@ ) search_params_2 = grpc.SearchParams( - exact=True, - indexed_only=True, + exact=True, indexed_only=True, acorn=grpc.AcornSearchParams(enable=True, max_selectivity=2.0) ) search_params_3 = grpc.SearchParams( - exact=True, - quantization=quantization_search_params, + exact=True, quantization=quantization_search_params, acorn=grpc.AcornSearchParams(enable=True) ) rename_alias = grpc.RenameAlias(old_alias_name="col2", new_alias_name="col3") @@ -514,12 +539,14 @@ lowercase=True, min_token_len=2, max_token_len=10, + ascii_folding=False, ) text_index_params_2 = grpc.TextIndexParams( tokenizer=grpc.TokenizerType.Whitespace, lowercase=False, max_token_len=10, + ascii_folding=True, ) text_index_params_3 = grpc.TextIndexParams( @@ -677,7 +704,6 @@ collection_info_ok = grpc.CollectionInfo( status=collection_status_green, optimizer_status=optimizer_status, - vectors_count=100000, points_count=100000, segments_count=6, config=collection_config, @@ -714,7 +740,6 @@ collection_info = grpc.CollectionInfo( status=collection_status, optimizer_status=optimizer_status_error, - vectors_count=100000, points_count=100000, segments_count=6, config=collection_config, @@ -750,7 +775,6 @@ collection_info_red = grpc.CollectionInfo( status=collection_status_error, optimizer_status=optimizer_status_error, - vectors_count=100000, points_count=100000, segments_count=6, config=collection_config, @@ -847,7 +871,7 @@ point_struct = grpc.PointStruct( id=point_id_1, - vectors=grpc.Vectors(vector=grpc.Vector(data=[1.0, 2.0, -1.0, -0.2])), + vectors=grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[1.0, 2.0, -1.0, -0.2]))), payload=payload_to_grpc({"my_payload": payload_value}), ) @@ -872,12 +896,19 @@ many_vectors = grpc.Vectors( vectors=grpc.NamedVectors( vectors={ - "image": grpc.Vector(data=[1.0, 2.0, -1.0, -0.2]), - "text": grpc.Vector(data=[1.0, 2.0, -1.0, -0.2]), + "image": grpc.Vector(dense=grpc.DenseVector(data=[1.0, 2.0, -1.0, -0.2])), + "text": grpc.Vector(dense=grpc.DenseVector(data=[1.0, 2.0, -1.0, -0.2])), "sparse": grpc.Vector( - data=[1.0, 2.0, -1.0, -0.2], indices=SparseIndices(data=[1, 2, 3]) + sparse=grpc.SparseVector(values=[1.0, 2.0, -1.0, -0.2], indices=[1, 2, 3]) + ), + "multi": grpc.Vector( + multi_dense=grpc.MultiDenseVector( + vectors=[ + grpc.DenseVector(data=[1.0, 2.0, 3.0, 4.0]), + grpc.DenseVector(data=[-1.0, -2.0, -3.0, -4.0]), + ] + ) ), - "multi": grpc.Vector(data=[1.0, 2.0, 3.0, 4.0], vectors_count=2), "doc_raw": grpc.Vector(document=document_with_options), "image_raw": grpc.Vector(image=image_with_options), "obj_raw": grpc.Vector(object=inference_object_with_options), @@ -947,6 +978,7 @@ quantization_config=quantization_config_diff_product, params=collections_params_diff, vectors_config=vector_config_diff_map, + metadata={"new_metadata": grpc.Value(integer_value=2)}, ) points_ids_list = grpc.PointsIdsList(ids=[point_id, point_id_2, point_id_2]) @@ -1016,33 +1048,11 @@ ] ) -search_points = grpc.SearchPoints( - collection_name="collection-123", - vector=[1.0, 2.0, 3.0, 5.0], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - score_threshold=0.123, - offset=10, - vector_name="abc", - with_vectors=grpc.WithVectorsSelector(include=grpc.VectorsSelector(names=["abc", "def"])), - shard_key_selector=shard_key_selector, - sparse_indices=grpc.SparseIndices(data=[1, 2, 3]), -) - -search_points_all_vectors = grpc.SearchPoints( - collection_name="collection-123", - vector=[1.0, 2.0, 3.0, 5.0], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - score_threshold=0.123, - offset=10, - vector_name="abc", - with_vectors=grpc.WithVectorsSelector(enable=True), - shard_key_selector=shard_key_selector_2, +shard_key_selector_3 = grpc.ShardKeySelector( + shard_keys=[ + grpc.ShardKey(number=123), + ], + fallback=grpc.ShardKey(keyword="abc"), ) lookup_location_1 = grpc.LookupLocation( @@ -1067,58 +1077,12 @@ offset=10, using="abc", with_vectors=grpc.WithVectorsSelector(include=grpc.VectorsSelector(names=["abc", "def"])), - shard_key_selector=shard_key_selector, + shard_key_selector=shard_key_selector_3, ) recommend_strategy = grpc.RecommendStrategy.BestScore recommend_strategy2 = grpc.RecommendStrategy.AverageVector recommend_strategy3 = grpc.RecommendStrategy.SumScores - -recommend_points = grpc.RecommendPoints( - collection_name="collection-123", - positive=[point_id_1, point_id_2], - negative=[point_id], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - score_threshold=0.123, - offset=10, - using="abc", - with_vectors=grpc.WithVectorsSelector(enable=True), - strategy=recommend_strategy, - positive_vectors=[ - grpc.Vector(data=[1.0, 2.0, -1.0, -0.2]), - grpc.Vector(data=[2.0, 2.0, -1.0, -0.2]), - ], - negative_vectors=[ - grpc.Vector(data=[3.0, 2.0, -1.0, -0.2]), - ], - shard_key_selector=shard_key_selector_2, - lookup_from=lookup_location_1, -) -legacy_sparse_vector = grpc.Vector( - data=[0.2, 0.3, 0.4], - indices=SparseIndices(data=[1, 2, 3]), -) -recommend_points_sparse = grpc.RecommendPoints( - collection_name="collection-123", - positive=[point_id_1, point_id_2], - negative=[point_id], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - score_threshold=0.123, - offset=10, - using="abc", - with_vectors=grpc.WithVectorsSelector(enable=True), - strategy=recommend_strategy, - positive_vectors=[legacy_sparse_vector], - negative_vectors=[legacy_sparse_vector], - shard_key_selector=shard_key_selector_2, -) - read_consistency = grpc.ReadConsistency( factor=1, ) @@ -1149,7 +1113,7 @@ point_vector_1 = grpc.PointVectors( id=point_id_1, - vectors=single_vector, + vectors=single_dense_vector, ) point_vector_2 = grpc.PointVectors( @@ -1157,11 +1121,9 @@ vectors=many_vectors, ) -point_vector_3 = grpc.PointVectors(id=point_id_1, vectors=single_dense_vector) - -point_vector_4 = grpc.PointVectors(id=point_id_1, vectors=single_sparse_vector) +point_vector_3 = grpc.PointVectors(id=point_id_1, vectors=single_sparse_vector) -point_vector_5 = grpc.PointVectors(id=point_id_1, vectors=single_multidense_vector) +point_vector_4 = grpc.PointVectors(id=point_id_1, vectors=single_multidense_vector) group_id_1 = grpc.GroupId(unsigned_value=123) group_id_2 = grpc.GroupId(integer_value=-456) @@ -1188,71 +1150,6 @@ with_payload=with_payload_include, ) -vector_example_1 = grpc.VectorExample( - vector=grpc.Vector(data=[1.0, 2.0, 3.0, 5.0]), -) - -vector_example_2 = grpc.VectorExample( - id=point_id_1, -) - -vector_example_3 = grpc.VectorExample( - vector=grpc.Vector( - data=[1.0, 2.0, 3.0, 5.0], - indices=SparseIndices(data=[1, 2, 3, 4]), - ), - id=point_id_1, -) - -target_vector_1 = grpc.TargetVector( - single=vector_example_1, -) - -context_example_pair_1 = grpc.ContextExamplePair( - positive=vector_example_1, - negative=vector_example_2, -) - -discover_points = grpc.DiscoverPoints( - collection_name="collection-123", - target=target_vector_1, - context=[context_example_pair_1, context_example_pair_1], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - offset=10, - using="abc", - with_vectors=grpc.WithVectorsSelector(enable=True), - shard_key_selector=shard_key_selector_2, -) - -sparse_vector_example = grpc.VectorExample( - vector=legacy_sparse_vector, -) -target_vector_sparse = grpc.TargetVector( - single=sparse_vector_example, -) - -context_example_pair_sparse = grpc.ContextExamplePair( - positive=sparse_vector_example, - negative=sparse_vector_example, -) -discover_points_sparse = grpc.DiscoverPoints( - collection_name="collection-123", - target=target_vector_sparse, - context=[context_example_pair_sparse, context_example_pair_sparse], - filter=filter_, - limit=100, - with_payload=with_payload_bool, - params=search_params, - offset=10, - using="abc", - with_vectors=grpc.WithVectorsSelector(enable=True), - shard_key_selector=shard_key_selector_2, - lookup_from=lookup_location_1, -) - upsert_operation = grpc.PointsUpdateOperation( upsert=grpc.PointsUpdateOperation.PointStructList( points=[point_struct], @@ -1494,6 +1391,9 @@ query_nearest_with_mmr_default = grpc.Query( nearest_with_mmr=grpc.NearestInputWithMmr(nearest=vector_input_dense, mmr=mmr_default) ) +query_rrf = grpc.Query(rrf=grpc.Rrf(k=3)) +query_rrf_default = grpc.Query(rrf=grpc.Rrf()) +query_rrf_explicit_none = grpc.Query(rrf=grpc.Rrf(k=None)) deep_prefetch_query = grpc.PrefetchQuery(query=query_recommend) prefetch_query = grpc.PrefetchQuery( @@ -1550,10 +1450,20 @@ ids=[point_id_1, point_id_2], ) +replica_state_active = grpc.ReplicaState.Active +replica_state_dead = grpc.ReplicaState.Dead +replica_state_partial = grpc.ReplicaState.Partial +replica_state_initializing = grpc.ReplicaState.Initializing +replica_state_listener = grpc.ReplicaState.Listener +replica_state_partial_snapshot = grpc.ReplicaState.PartialSnapshot +replica_state_recovery = grpc.ReplicaState.Recovery +replica_state_resharding = grpc.ReplicaState.Resharding +replica_state_resharding_scale_down = grpc.ReplicaState.ReshardingScaleDown +replica_state_active_read = grpc.ReplicaState.ActiveRead fixtures = { "CollectionParams": [collection_params, collection_params_2], - "CollectionConfig": [collection_config], + "CollectionConfig": [collection_config, collection_config_w_metadata], "ScoredPoint": [ scored_point, scored_point_order_value_int, @@ -1635,9 +1545,7 @@ vector_param_with_multivector, ], "VectorsConfig": [single_vector_config, vector_config], - "SearchPoints": [search_points, search_points_all_vectors], "QueryPoints": [query_points], - "RecommendPoints": [recommend_points, recommend_points_sparse], "RecommendStrategy": [recommend_strategy, recommend_strategy2, recommend_strategy3], "TextIndexParams": [ text_index_params_1, @@ -1673,9 +1581,8 @@ "PointVectors": [ point_vector_1, point_vector_2, - # point_vector_3, # todo: uncomment as of 1.14.0 - # point_vector_4, # todo: uncomment as of 1.14.0 - # point_vector_5, # todo: uncomment as of 1.14.0 + point_vector_3, + point_vector_4, ], "GroupId": [group_id_1, group_id_2, group_id_3], "GroupsResult": [group_result], @@ -1696,10 +1603,6 @@ delete_vectors_operation, delete_vectors_operation_2, ], - "DiscoverPoints": [discover_points, discover_points_sparse], - "ContextExamplePair": [context_example_pair_1], - "VectorExample": [vector_example_1, vector_example_2, vector_example_3], - "TargetVector": [target_vector_1], "SparseVectorParams": [sparse_vector_params, sparse_vector_params_datatype], "SparseVectorConfig": [sparse_vector_config], "ShardKeySelector": [shard_key_selector, shard_key_selector_2], @@ -1720,6 +1623,9 @@ query_formula, query_nearest_with_mmr, query_nearest_with_mmr_default, + query_rrf, + query_rrf_default, + query_rrf_explicit_none, ], "FacetValueHit": [facet_string_hit, facet_integer_hit], "PrefetchQuery": [deep_prefetch_query, prefetch_query, prefetch_full_query, prefetch_many], @@ -1727,6 +1633,18 @@ "SearchMatrixPairs": [search_matrix_pairs], "SearchMatrixOffsets": [search_matrix_offsets], "StrictModeConfig": [strict_mode_config, strict_mode_config_empty], + "ReplicaState": [ + replica_state_active, + replica_state_dead, + replica_state_partial, + replica_state_initializing, + replica_state_listener, + replica_state_partial_snapshot, + replica_state_recovery, + replica_state_resharding, + replica_state_resharding_scale_down, + replica_state_active_read, + ], } diff --git a/tests/conversions/test_validate_conversions.py b/tests/conversions/test_validate_conversions.py index 22b4f9b9..32965099 100644 --- a/tests/conversions/test_validate_conversions.py +++ b/tests/conversions/test_validate_conversions.py @@ -115,39 +115,51 @@ def test_vector_batch_conversion(): batch = [[]] res = RestToGrpc.convert_batch_vector_struct(batch, 1) assert len(res) == 1 - assert res == [grpc.Vectors(vector=grpc.Vector(data=[]))] + assert res == [grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[])))] batch = [[1, 2, 3]] res = RestToGrpc.convert_batch_vector_struct(batch, 1) assert len(res) == 1 - assert res == [grpc.Vectors(vector=grpc.Vector(data=[1, 2, 3]))] + assert res == [grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3])))] batch = [[1, 2, 3]] res = RestToGrpc.convert_batch_vector_struct(batch, 1) assert len(res) == 1 - assert res == [grpc.Vectors(vector=grpc.Vector(data=[1, 2, 3]))] + assert res == [grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3])))] batch = [[1, 2, 3], [3, 4, 5]] res = RestToGrpc.convert_batch_vector_struct(batch, 0) assert len(res) == 2 assert res == [ - grpc.Vectors(vector=grpc.Vector(data=[1, 2, 3])), - grpc.Vectors(vector=grpc.Vector(data=[3, 4, 5])), + grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3]))), + grpc.Vectors(vector=grpc.Vector(dense=grpc.DenseVector(data=[3, 4, 5]))), ] batch = {"image": [[1, 2, 3]]} res = RestToGrpc.convert_batch_vector_struct(batch, 1) assert len(res) == 1 assert res == [ - grpc.Vectors(vectors=grpc.NamedVectors(vectors={"image": grpc.Vector(data=[1, 2, 3])})) + grpc.Vectors( + vectors=grpc.NamedVectors( + vectors={"image": grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3]))} + ) + ) ] batch = {"image": [[1, 2, 3], [3, 4, 5]]} res = RestToGrpc.convert_batch_vector_struct(batch, 2) assert len(res) == 2 assert res == [ - grpc.Vectors(vectors=grpc.NamedVectors(vectors={"image": grpc.Vector(data=[1, 2, 3])})), - grpc.Vectors(vectors=grpc.NamedVectors(vectors={"image": grpc.Vector(data=[3, 4, 5])})), + grpc.Vectors( + vectors=grpc.NamedVectors( + vectors={"image": grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3]))} + ) + ), + grpc.Vectors( + vectors=grpc.NamedVectors( + vectors={"image": grpc.Vector(dense=grpc.DenseVector(data=[3, 4, 5]))} + ) + ), ] batch = {"image": [[1, 2, 3], [3, 4, 5]], "restaurants": [[6, 7, 8], [9, 10, 11]]} @@ -157,38 +169,25 @@ def test_vector_batch_conversion(): grpc.Vectors( vectors=grpc.NamedVectors( vectors={ - "image": grpc.Vector(data=[1, 2, 3]), - "restaurants": grpc.Vector(data=[6, 7, 8]), + "image": grpc.Vector(dense=grpc.DenseVector(data=[1, 2, 3])), + "restaurants": grpc.Vector(dense=grpc.DenseVector(data=[6, 7, 8])), } ) ), grpc.Vectors( vectors=grpc.NamedVectors( vectors={ - "image": grpc.Vector(data=[3, 4, 5]), - "restaurants": grpc.Vector(data=[9, 10, 11]), + "image": grpc.Vector(dense=grpc.DenseVector(data=[3, 4, 5])), + "restaurants": grpc.Vector(dense=grpc.DenseVector(data=[9, 10, 11])), } ) ), ] -def test_sparse_vector_conversion(): - from qdrant_client import grpc - from qdrant_client.conversions.conversion import GrpcToRest, RestToGrpc - - sparse_vector = grpc.Vector(data=[0.2, 0.3, 0.4], indices=grpc.SparseIndices(data=[3, 2, 5])) - recovered = RestToGrpc.convert_sparse_vector_to_vector( - GrpcToRest.convert_vector(sparse_vector) - ) - - assert sparse_vector == recovered - - def test_sparse_vector_batch_conversion(): from qdrant_client import grpc from qdrant_client.conversions.conversion import RestToGrpc - from qdrant_client.grpc import SparseIndices from qdrant_client.http.models import SparseVector batch = {"image": [SparseVector(values=[1.5, 2.4, 8.1], indices=[10, 20, 30])]} @@ -199,7 +198,7 @@ def test_sparse_vector_batch_conversion(): vectors=grpc.NamedVectors( vectors={ "image": grpc.Vector( - data=[1.5, 2.4, 8.1], indices=SparseIndices(data=[10, 20, 30]) + sparse=grpc.SparseVector(values=[1.5, 2.4, 8.1], indices=[10, 20, 30]) ) } ) @@ -219,7 +218,7 @@ def test_sparse_vector_batch_conversion(): vectors=grpc.NamedVectors( vectors={ "image": grpc.Vector( - data=[1.5, 2.4, 8.1], indices=SparseIndices(data=[10, 20, 30]) + sparse=grpc.SparseVector(values=[1.5, 2.4, 8.1], indices=[10, 20, 30]) ) } ) @@ -228,7 +227,7 @@ def test_sparse_vector_batch_conversion(): vectors=grpc.NamedVectors( vectors={ "image": grpc.Vector( - data=[7.8, 3.2, 9.5], indices=SparseIndices(data=[100, 200, 300]) + sparse=grpc.SparseVector(values=[7.8, 3.2, 9.5], indices=[100, 200, 300]) ) } ) @@ -258,14 +257,6 @@ def test_grpc_payload_scheme_conversion(): ) -def test_init_from_conversion(): - from qdrant_client.conversions.conversion import GrpcToRest, RestToGrpc - - init_from = "collection_name" - recovered = RestToGrpc.convert_init_from(GrpcToRest.convert_init_from(init_from)) - assert init_from == recovered - - @pytest.mark.parametrize( "dt", [ @@ -379,66 +370,6 @@ def test_query_points(): assert recovered.prefetch[0] == query_request.prefetch -def test_extended_vectors(): - # todo: test in fixtures.py from v1.14.0 - import numpy as np - - from qdrant_client import grpc, models - from qdrant_client.conversions.conversion import GrpcToRest - - # region grpc.Vector - dense_vector = grpc.Vector(dense=grpc.DenseVector(data=[0.2, 0.3, 0.4])) - sparse_vector = grpc.Vector( - sparse=grpc.SparseVector(values=[0.1, 0.2, 0.3], indices=[1, 42, 240]) - ) - multi_dense_vector = grpc.Vector( - multi_dense=grpc.MultiDenseVector( - vectors=[ - grpc.DenseVector(data=[0.1, 0.3, 0.5]), - grpc.DenseVector(data=[0.2, 0.4, 0.6]), - ] - ) - ) - - rest_dense_vector = GrpcToRest.convert_vector(dense_vector) - rest_sparse_vector = GrpcToRest.convert_vector(sparse_vector) - rest_multi_dense_vector = GrpcToRest.convert_vector(multi_dense_vector) - assert np.allclose(rest_dense_vector, [0.2, 0.3, 0.4]) - assert ( - isinstance(rest_sparse_vector, models.SparseVector) - and np.allclose(rest_sparse_vector.values, [0.1, 0.2, 0.3]) - and rest_sparse_vector.indices == [1, 42, 240] - ) - assert np.allclose(rest_multi_dense_vector, np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])) - # endregion - - # region grpc.VectorOutput - dense_vector_output = grpc.VectorOutput(dense=grpc.DenseVector(data=[0.2, 0.3, 0.4])) - sparse_vector_output = grpc.VectorOutput( - sparse=grpc.SparseVector(values=[0.1, 0.2, 0.3], indices=[1, 42, 240]) - ) - multi_dense_vector_output = grpc.VectorOutput( - multi_dense=grpc.MultiDenseVector( - vectors=[ - grpc.DenseVector(data=[0.1, 0.3, 0.5]), - grpc.DenseVector(data=[0.2, 0.4, 0.6]), - ] - ) - ) - - rest_dense_vector = GrpcToRest.convert_vector_output(dense_vector_output) - rest_sparse_vector = GrpcToRest.convert_vector_output(sparse_vector_output) - rest_multi_dense_vector = GrpcToRest.convert_vector_output(multi_dense_vector_output) - assert np.allclose(rest_dense_vector, [0.2, 0.3, 0.4]) - assert ( - isinstance(rest_sparse_vector, models.SparseVector) - and np.allclose(rest_sparse_vector.values, [0.1, 0.2, 0.3]) - and rest_sparse_vector.indices == [1, 42, 240] - ) - assert np.allclose(rest_multi_dense_vector, np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])) - # endregion - - def test_convert_text_index_params_stopwords(): from qdrant_client import models from qdrant_client.conversions.conversion import GrpcToRest, RestToGrpc @@ -524,3 +455,80 @@ def test_inference_without_options(): assert recovered_doc_wo_options.options == {} assert recovered_image_wo_options.options == {} assert recovered_inference_wo_options.options == {} + + +def test_convert_shard_key_with_fallback(): + from qdrant_client import models, grpc as q_grpc + from qdrant_client.conversions.conversion import GrpcToRest, RestToGrpc + + single_int_shard_key = 2 + single_str_shard_key = "abc" + shard_keys = [2, "qwerty"] + shard_key_with_int_fallback = models.ShardKeyWithFallback(target="123", fallback=3) + shard_key_with_str_fallback = models.ShardKeyWithFallback(target=123, fallback="zxc") + + for key in ( + single_int_shard_key, + single_str_shard_key, + shard_keys, + shard_key_with_int_fallback, + shard_key_with_str_fallback, + ): + grpc_key = RestToGrpc.convert_shard_key_selector(key) + restored_key = GrpcToRest.convert_shard_key_selector(grpc_key) + assert restored_key == key + + single_int_shard_key_list = [3] + single_str_shard_key_list = ["abc"] + for keys in single_int_shard_key_list, single_str_shard_key_list: + grpc_keys = RestToGrpc.convert_shard_key_selector(keys) + restored_key = GrpcToRest.convert_shard_key_selector(grpc_keys) + assert keys[0] == restored_key + + invalid_grpc_fallback_shard_key = q_grpc.ShardKeySelector( + shard_keys=[q_grpc.ShardKey(number=3), q_grpc.ShardKey(number=2)], + fallback=q_grpc.ShardKey(number=2), + ) + + with pytest.raises(ValueError): + GrpcToRest.convert_shard_key_selector(invalid_grpc_fallback_shard_key) + + +def test_legacy_vector(): + from qdrant_client import grpc as q_grpc + from qdrant_client.conversions.conversion import GrpcToRest, RestToGrpc + + legacy_sparse_vector = q_grpc.Vector( + data=[0.2, 0.3, 0.4], + indices=q_grpc.SparseIndices(data=[1, 2, 3]), + ) + + rest_sparse_vector = GrpcToRest.convert_vector(legacy_sparse_vector) + restored_sparse_vector = RestToGrpc.convert_sparse_vector_to_vector(rest_sparse_vector) + + assert restored_sparse_vector == q_grpc.Vector( + sparse=q_grpc.SparseVector( + values=legacy_sparse_vector.data, indices=legacy_sparse_vector.indices.data + ) + ) + + legacy_dense_vector = q_grpc.Vector(data=[1.0, 2.0]) + rest_dense_vector = GrpcToRest.convert_vector(legacy_dense_vector) + restored_dense_vector = RestToGrpc.convert_vector_struct(rest_dense_vector) + + assert restored_dense_vector.vector == q_grpc.Vector( + dense=q_grpc.DenseVector(data=legacy_dense_vector.data) + ) + + legacy_multi_dense_vector = q_grpc.Vector(data=[1.0, 2.0, 3.0, 4.0], vectors_count=2) + rest_multidense_vector = GrpcToRest.convert_vector(legacy_multi_dense_vector) + restored_multi_dense_vector = RestToGrpc.convert_vector_struct(rest_multidense_vector) + + assert restored_multi_dense_vector.vector == q_grpc.Vector( + multi_dense=q_grpc.MultiDenseVector( + vectors=[ + q_grpc.DenseVector(data=legacy_multi_dense_vector.data[:2]), + q_grpc.DenseVector(data=legacy_multi_dense_vector.data[2:]), + ] + ) + ) diff --git a/tests/fixtures/filters.py b/tests/fixtures/filters.py index dd94e0be..8bfcc7d6 100644 --- a/tests/fixtures/filters.py +++ b/tests/fixtures/filters.py @@ -3,7 +3,12 @@ from typing import Union from qdrant_client.http import models -from tests.fixtures.payload import geo_points, random_real_word, random_signed_int +from tests.fixtures.payload import ( + geo_points, + random_real_word, + random_signed_int, + random_real_words, +) """ data structure: @@ -56,6 +61,7 @@ - text - number - any(in) + - text any - range - geo_bounding_box - geo_radius @@ -156,6 +162,12 @@ def match_text_field_condition() -> models.FieldCondition: ) +def match_text_any_field_condition() -> models.FieldCondition: + field = "words" + text_any = random_real_words() + return models.FieldCondition(key=field, match=models.MatchTextAny(text_any=text_any)) + + def match_any_field_condition() -> models.FieldCondition: field = "id_str" any_vals = [str(random.randint(1, 30)).zfill(2) for _ in range(3)] @@ -294,6 +306,7 @@ def one_random_condition_please() -> models.Condition: match_value_field_condition, match_text_field_condition, match_any_field_condition, + match_text_any_field_condition, match_except_field_condition, range_field_condition, datetime_range_field_condition, diff --git a/tests/fixtures/payload.py b/tests/fixtures/payload.py index c431504e..b84fcead 100644 --- a/tests/fixtures/payload.py +++ b/tests/fixtures/payload.py @@ -177,6 +177,10 @@ def random_real_word(): return random.choice(random_words) +def random_real_words() -> str: + return " ".join([random_real_word() for _ in range(random.randint(1, 3))]) + + def random_city(): name = random.choice(list(geo_points.keys())) return {"name": name, "geo": geo_points[name]} diff --git a/tests/test_async_qdrant_client.py b/tests/test_async_qdrant_client.py index 29a39e9c..4096aa59 100644 --- a/tests/test_async_qdrant_client.py +++ b/tests/test_async_qdrant_client.py @@ -38,8 +38,8 @@ async def test_async_qdrant_client(prefer_grpc): await client.get_collection(COLLECTION_NAME) await client.get_collections() - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 8, 0): - await client.collection_exists(COLLECTION_NAME) + + await client.collection_exists(COLLECTION_NAME) await client.update_collection( COLLECTION_NAME, hnsw_config=models.HnswConfigDiff(m=32, ef_construct=120) @@ -81,21 +81,23 @@ async def test_async_qdrant_client(prefer_grpc): assert ( len( - await client.search( - COLLECTION_NAME, - query_vector=np.random.rand(10).tolist(), # type: ignore - limit=10, - ) + ( + await client.query_points( + COLLECTION_NAME, + query=np.random.rand(10).tolist(), # type: ignore + limit=10, + ) + ).points ) == 10 ) assert ( len( - await client.search_batch( + await client.query_batch_points( COLLECTION_NAME, requests=[ - models.SearchRequest(vector=np.random.rand(10).tolist(), limit=10) + models.QueryRequest(query=np.random.rand(10).tolist(), limit=10) for _ in range(3) ], ) @@ -106,9 +108,9 @@ async def test_async_qdrant_client(prefer_grpc): assert ( len( ( - await client.search_groups( + await client.query_points_groups( COLLECTION_NAME, - query_vector=np.random.rand(10).tolist(), # type: ignore + query=np.random.rand(10).tolist(), # type: ignore limit=4, group_by="random_dig", ) @@ -117,12 +119,26 @@ async def test_async_qdrant_client(prefer_grpc): == 4 ) - assert len(await client.recommend(COLLECTION_NAME, positive=[0], limit=5)) == 5 assert ( len( ( - await client.recommend_groups( - COLLECTION_NAME, positive=[1], group_by="random_dig", limit=6 + await client.query_points( + COLLECTION_NAME, + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[0])), + limit=5, + ) + ).points + ) + == 5 + ) + assert ( + len( + ( + await client.query_points_groups( + COLLECTION_NAME, + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[1])), + group_by="random_dig", + limit=6, ) ).groups ) @@ -131,29 +147,22 @@ async def test_async_qdrant_client(prefer_grpc): assert ( len( ( - await client.recommend_batch( + await client.query_batch_points( COLLECTION_NAME, - requests=[models.RecommendRequest(positive=[2], limit=7)], + requests=[ + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[2]) + ), + limit=7, + ) + ], ) - )[0] + )[0].points ) == 7 ) - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 10, 0): - assert ( - len( - ( - await client.query_points(COLLECTION_NAME, query=np.random.rand(10).tolist()) - ).points - ) - == 10 - ) - query_responses = await client.query_batch_points( - COLLECTION_NAME, requests=[models.QueryRequest(query=np.random.rand(10).tolist())] - ) - assert len(query_responses) == 1 and len(query_responses[0].points) == 10 - assert len(await client.retrieve(COLLECTION_NAME, ids=[3, 5])) == 2 await client.create_payload_index( @@ -166,11 +175,6 @@ async def test_async_qdrant_client(prefer_grpc): await client.delete_payload_index(COLLECTION_NAME, field_name="random_dig") assert "random_dig" not in (await client.get_collection(COLLECTION_NAME)).payload_schema - assert not (await client.lock_storage(reason="test")).write - assert (await client.get_locks()).write - assert (await client.unlock_storage()).write - assert not (await client.get_locks()).write - assert isinstance(await client.create_snapshot(COLLECTION_NAME), models.SnapshotDescription) snapshots = await client.list_snapshots(COLLECTION_NAME) assert len(snapshots) == 1 @@ -323,21 +327,23 @@ async def test_async_qdrant_client_local(): assert ( len( - await client.search( - COLLECTION_NAME, - query_vector=np.random.rand(10).tolist(), # type: ignore - limit=10, - ) + ( + await client.query_points( + COLLECTION_NAME, + query=np.random.rand(10).tolist(), # type: ignore + limit=10, + ) + ).points ) == 10 ) assert ( len( - await client.search_batch( + await client.query_batch_points( COLLECTION_NAME, requests=[ - models.SearchRequest(vector=np.random.rand(10).tolist(), limit=10) + models.QueryRequest(query=np.random.rand(10).tolist(), limit=10) for _ in range(3) ], ) @@ -348,9 +354,9 @@ async def test_async_qdrant_client_local(): assert ( len( ( - await client.search_groups( + await client.query_points_groups( COLLECTION_NAME, - query_vector=np.random.rand(10).tolist(), # type: ignore + query=np.random.rand(10).tolist(), # type: ignore limit=4, group_by="random_dig", ) @@ -359,26 +365,26 @@ async def test_async_qdrant_client_local(): == 4 ) - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 10, 0): - assert ( - len( - ( - await client.query_points(COLLECTION_NAME, query=np.random.rand(10).tolist()) - ).points - ) - == 10 - ) - query_responses = await client.query_batch_points( - COLLECTION_NAME, requests=[models.QueryRequest(query=np.random.rand(10).tolist())] + assert ( + len( + ( + await client.query_points( + COLLECTION_NAME, + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[0])), + limit=5, + ) + ).points ) - assert len(query_responses) == 1 and len(query_responses[0].points) == 10 - - assert len(await client.recommend(COLLECTION_NAME, positive=[0], limit=5)) == 5 + == 5 + ) assert ( len( ( - await client.recommend_groups( - COLLECTION_NAME, positive=[1], group_by="random_dig", limit=6 + await client.query_points_groups( + COLLECTION_NAME, + query=models.RecommendQuery(recommend=models.RecommendInput(positive=[1])), + group_by="random_dig", + limit=6, ) ).groups ) @@ -387,11 +393,18 @@ async def test_async_qdrant_client_local(): assert ( len( ( - await client.recommend_batch( + await client.query_batch_points( COLLECTION_NAME, - requests=[models.RecommendRequest(positive=[2], limit=7)], + requests=[ + models.QueryRequest( + query=models.RecommendQuery( + recommend=models.RecommendInput(positive=[2]) + ), + limit=7, + ) + ], ) - )[0] + )[0].points ) == 7 ) @@ -406,8 +419,6 @@ async def test_async_qdrant_client_local(): await client.delete_payload_index(COLLECTION_NAME, field_name="random_dig") - assert await client.get_locks() - assert len(await client.list_snapshots(COLLECTION_NAME)) == 0 assert len(await client.list_full_snapshots()) == 0 @@ -499,7 +510,7 @@ async def async_auth_token_provider(): await client.get_collections() assert token == "token_1" - await client.unlock_storage() + await client.get_collections() assert token == "token_2" sync_token = "" @@ -555,7 +566,7 @@ def auth_token_provider(): await client.get_collections() assert sync_token == "token_1" - await client.unlock_storage() + await client.get_collections() assert sync_token == "token_2" @@ -578,4 +589,4 @@ async def test_custom_sharding(prefer_grpc): collection_info = await client.get_collection(COLLECTION_NAME) assert collection_info.config.params.shard_number == 1 - # assert collection_info.config.params.sharding_method == models.ShardingMethod.CUSTOM # todo: fix in grpc + assert collection_info.config.params.sharding_method == models.ShardingMethod.CUSTOM diff --git a/tests/test_in_memory.py b/tests/test_in_memory.py index 2b96f4d9..15c70766 100644 --- a/tests/test_in_memory.py +++ b/tests/test_in_memory.py @@ -42,14 +42,14 @@ def test_dense_in_memory_key_filter_returns_results(qdrant: QdrantClient): assert operation_info.operation_id == 0 assert operation_info.status == models.UpdateStatus.COMPLETED - search_result = qdrant.search( + search_result = qdrant.query_points( collection_name="test_collection", - query_vector=[0.2, 0.1, 0.9, 0.7], + query=[0.2, 0.1, 0.9, 0.7], query_filter=models.Filter( must=[models.FieldCondition(key="city", match=models.MatchValue(value="London"))] ), limit=3, - ) + ).points assert [r.id for r in search_result] == [4, 2] @@ -107,16 +107,14 @@ def test_sparse_in_memory_key_filter_returns_results(qdrant: QdrantClient): assert operation_info.operation_id == 0 assert operation_info.status == models.UpdateStatus.COMPLETED - search_result = qdrant.search( + search_result = qdrant.query_points( collection_name="test_collection", - query_vector=models.NamedSparseVector( - name="text", - vector=models.SparseVector(indices=[0, 1, 2, 3], values=[0.2, 0.1, 0.9, 0.7]), - ), + using="text", + query=models.SparseVector(indices=[0, 1, 2, 3], values=[0.2, 0.1, 0.9, 0.7]), query_filter=models.Filter( must=[models.FieldCondition(key="city", match=models.MatchValue(value="London"))] ), limit=3, - ) + ).points assert [r.id for r in search_result] == [4, 2] diff --git a/tests/test_local_persistence.py b/tests/test_local_persistence.py index 4a9bb74a..71f3e16c 100644 --- a/tests/test_local_persistence.py +++ b/tests/test_local_persistence.py @@ -150,3 +150,54 @@ def test_local_sparse_persistence(add_dense_to_config): client = qdrant_client.QdrantClient(path=tmpdir) assert client.count(default_collection_name).count == 10 assert client.count("example_2").count == 10 + + +def test_update_persisence(): + collection_name = "update_persisence" + with tempfile.TemporaryDirectory() as tmpdir: + client = qdrant_client.QdrantClient(path=tmpdir) + + if client.collection_exists(collection_name): + client.delete_collection(collection_name) + + client.create_collection( + collection_name, + vectors_config={"dense": rest.VectorParams(size=20, distance=rest.Distance.COSINE)}, + sparse_vectors_config={ + "text": rest.SparseVectorParams(), + }, + metadata={"important": "meta information"}, + ) + + original_collection_info = client.get_collection(collection_name) + + assert original_collection_info.config.params.sparse_vectors["text"].modifier is None + assert original_collection_info.config.metadata == {"important": "meta information"} + + client.update_collection( + collection_name, + sparse_vectors_config={"text": rest.SparseVectorParams(modifier=rest.Modifier.IDF)}, + metadata={"not_important": "missing"}, + ) + updated_collection_info = client.get_collection(collection_name) + assert ( + updated_collection_info.config.params.sparse_vectors["text"].modifier + == rest.Modifier.IDF + ) + assert updated_collection_info.config.metadata == { + "important": "meta information", + "not_important": "missing", + } + client.close() + del client + + client = qdrant_client.QdrantClient(path=tmpdir) + persisted_collection_info = client.get_collection(collection_name) + assert ( + persisted_collection_info.config.params.sparse_vectors["text"].modifier + == rest.Modifier.IDF + ) + assert persisted_collection_info.config.metadata == { + "important": "meta information", + "not_important": "missing", + } diff --git a/tests/test_qdrant_client.py b/tests/test_qdrant_client.py index 31690247..83efda68 100644 --- a/tests/test_qdrant_client.py +++ b/tests/test_qdrant_client.py @@ -240,75 +240,6 @@ def test_client_init(): assert client.init_options["verify"] is ssl_context -@pytest.mark.parametrize("prefer_grpc", [False, True]) -@pytest.mark.parametrize("parallel", [1, 2]) -def test_records_upload(prefer_grpc, parallel): - import warnings - - warnings.simplefilter("ignore", category=DeprecationWarning) - - records = ( - Record( - id=idx, - vector=np.random.rand(DIM).tolist(), - payload=one_random_payload_please(idx), - ) - for idx in range(NUM_VECTORS) - ) - - client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) - if client.collection_exists(COLLECTION_NAME): - client.delete_collection(collection_name=COLLECTION_NAME, timeout=TIMEOUT) - client.create_collection( - collection_name=COLLECTION_NAME, - vectors_config=VectorParams(size=DIM, distance=Distance.DOT), - timeout=TIMEOUT, - ) - - client.upload_records(collection_name=COLLECTION_NAME, records=records, parallel=parallel) - - # By default, Qdrant indexes data updates asynchronously, so client don't need to wait before sending next batch - # Let's give it a second to actually add all points to a collection. - # If you need to change this behaviour - simply enable synchronous processing by enabling `wait=true` - sleep(1) - - collection_info = client.get_collection(collection_name=COLLECTION_NAME) - - assert collection_info.points_count == NUM_VECTORS - - result_count = client.count( - COLLECTION_NAME, - count_filter=Filter( - must=[ - FieldCondition( - key="rand_number", # Condition based on values of `rand_number` field. - range=Range(gte=0.5), # Select only those results where `rand_number` >= 0.5 - ) - ] - ), - ) - - assert result_count.count < 900 - assert result_count.count > 100 - - records = (Record(id=idx, vector=np.random.rand(DIM).tolist()) for idx in range(NUM_VECTORS)) - - client.delete_collection(collection_name=COLLECTION_NAME, timeout=TIMEOUT) - client.create_collection( - collection_name=COLLECTION_NAME, - vectors_config=VectorParams(size=DIM, distance=Distance.DOT), - timeout=TIMEOUT, - ) - - client.upload_records( - collection_name=COLLECTION_NAME, records=records, parallel=parallel, wait=True - ) - - collection_info = client.get_collection(collection_name=COLLECTION_NAME) - - assert collection_info.points_count == NUM_VECTORS - - @pytest.mark.parametrize("prefer_grpc", [False, True]) @pytest.mark.parametrize("parallel", [1, 2]) def test_point_upload(prefer_grpc, parallel): @@ -462,23 +393,25 @@ def test_multiple_vectors(prefer_grpc): query_vector = list(np.random.rand(DIM)) - hits = client.search( + hits = client.query_points( collection_name=COLLECTION_NAME, - query_vector=("image", query_vector), + query=query_vector, + using="image", with_vectors=True, limit=5, # Return 5 closest points - ) + ).points assert len(hits) == 5 assert "image" in hits[0].vector assert "text" in hits[0].vector - hits = client.search( + hits = client.query_points( collection_name=COLLECTION_NAME, - query_vector=("text", query_vector * 2), + using="text", + query=query_vector * 2, with_vectors=True, limit=5, # Return 5 closest points - ) + ).points assert len(hits) == 5 assert "image" in hits[0].vector @@ -489,8 +422,6 @@ def test_multiple_vectors(prefer_grpc): @pytest.mark.parametrize("numpy_upload", [False, True]) @pytest.mark.parametrize("local_mode", [False, True]) def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): - major, minor, patch, dev = read_version() - vectors_path = create_random_vectors() if numpy_upload: @@ -514,9 +445,8 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): timeout=TIMEOUT, ) - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 8, 0): - assert client.collection_exists(collection_name=COLLECTION_NAME) - assert not client.collection_exists(collection_name="non_existing_collection") + assert client.collection_exists(collection_name=COLLECTION_NAME) + assert not client.collection_exists(collection_name="non_existing_collection") # Call Qdrant API to retrieve list of existing collections collections = client.get_collections().collections @@ -602,13 +532,13 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): query_vector_3: list[float] = list(np.random.rand(DIM)) # and use it as a query - hits = client.search( + hits = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, query_filter=None, # Don't use any filters for now, search across all indexed points with_payload=True, # Also return a stored payload for found points limit=5, # Return 5 closest points - ) + ).points assert len(hits) == 5 @@ -619,19 +549,19 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): client.create_payload_index(COLLECTION_NAME, "id_str", field_schema=PayloadSchemaType.KEYWORD) # and use it as a query - hits = client.search( + hits = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, query_filter=Filter(must=[FieldCondition(key="id_str", match=MatchValue(value="11"))]), with_payload=True, limit=5, - ) + ).points assert "11" in hits[0].payload["id_str"] - hits_should = client.search( + hits_should = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, query_filter=Filter( should=[ FieldCondition(key="id_str", match=MatchValue(value="10")), @@ -640,11 +570,11 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): ), with_payload=True, limit=5, - ) + ).points - hits_match_any = client.search( + hits_match_any = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, query_filter=Filter( must=[ FieldCondition( @@ -655,48 +585,47 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): ), with_payload=True, limit=5, - ) + ).points assert hits_should == hits_match_any - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 8, 0): - hits_min_should = client.search( - collection_name=COLLECTION_NAME, - query_vector=query_vector, - query_filter=Filter( - min_should=models.MinShould( - conditions=[ - FieldCondition(key="id_str", match=MatchValue(value="11")), - FieldCondition(key="rand_digit", match=MatchAny(any=list(range(10)))), - FieldCondition(key="id", match=MatchAny(any=list(range(100, 150)))), - ], - min_count=2, - ) - ), - with_payload=True, - limit=5, - ) - assert len(hits_min_should) > 0 + hits_min_should = client.query_points( + collection_name=COLLECTION_NAME, + query=query_vector, + query_filter=Filter( + min_should=models.MinShould( + conditions=[ + FieldCondition(key="id_str", match=MatchValue(value="11")), + FieldCondition(key="rand_digit", match=MatchAny(any=list(range(10)))), + FieldCondition(key="id", match=MatchAny(any=list(range(100, 150)))), + ], + min_count=2, + ) + ), + with_payload=True, + limit=5, + ).points + assert len(hits_min_should) > 0 - hits_min_should_empty = client.search( - collection_name=COLLECTION_NAME, - query_vector=query_vector, - query_filter=Filter( - min_should=models.MinShould( - conditions=[ - FieldCondition(key="id_str", match=MatchValue(value="11")), - ], - min_count=2, - ) - ), - with_payload=True, - limit=5, - ) - assert len(hits_min_should_empty) == 0 + hits_min_should_empty = client.query_points( + collection_name=COLLECTION_NAME, + query=query_vector, + query_filter=Filter( + min_should=models.MinShould( + conditions=[ + FieldCondition(key="id_str", match=MatchValue(value="11")), + ], + min_count=2, + ) + ), + with_payload=True, + limit=5, + ).points + assert len(hits_min_should_empty) == 0 # Let's now query same vector with filter condition - hits = client.search( + hits = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, query_filter=Filter( must=[ # These conditions are required for search results FieldCondition( @@ -705,23 +634,14 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): ) ] ), - append_payload=True, # Also return a stored payload for found points + with_payload=True, limit=5, # Return 5 closest points - ) + ).points print("Filtered search result (`rand_number` >= 0.5):") for hit in hits: print(hit) - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 10, 0): - query_response = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector, - limit=5, - ) - - assert len(query_response.points) == 5 - got_points = client.retrieve( collection_name=COLLECTION_NAME, ids=[1, 2, 3], @@ -765,144 +685,53 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): filter_2 = Filter(must=[FieldCondition(key="rand_number", range=Range(gte=0.5))]) filter_3 = Filter(must=[FieldCondition(key="rand_number", range=Range(gte=0.7))]) - search_queries = [ - SearchRequest( - vector=query_vector_1, + query_points_requests = [ + models.QueryRequest( + query=query_vector_1, filter=filter_1, limit=5, with_payload=True, ), - SearchRequest( - vector=query_vector_2, + models.QueryRequest( + query=query_vector_2, filter=filter_2, limit=5, with_payload=True, ), - SearchRequest( - vector=query_vector_3, + models.QueryRequest( + query=query_vector_3, filter=filter_3, limit=5, with_payload=True, ), ] - single_search_result_1 = client.search( + single_query_result_1 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector_1, + query=query_vector_1, query_filter=filter_1, limit=5, ) - single_search_result_2 = client.search( + single_query_result_2 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector_2, + query=query_vector_2, query_filter=filter_2, limit=5, ) - single_search_result_3 = client.search( + single_query_result_3 = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector_3, + query=query_vector_3, query_filter=filter_3, limit=5, ) - batch_search_result = client.search_batch( - collection_name=COLLECTION_NAME, requests=search_queries - ) - - assert len(batch_search_result) == 3 - assert batch_search_result[0] == single_search_result_1 - assert batch_search_result[1] == single_search_result_2 - assert batch_search_result[2] == single_search_result_3 - - recommend_queries = [ - RecommendRequest( - positive=[1], - negative=[], - filter=filter_1, - limit=5, - with_payload=True, - ), - RecommendRequest( - positive=[2], - negative=[], - filter=filter_2, - limit=5, - with_payload=True, - ), - RecommendRequest( - positive=[3], - negative=[], - filter=filter_3, - limit=5, - with_payload=True, - ), - ] - reco_result_1 = client.recommend( - collection_name=COLLECTION_NAME, positive=[1], query_filter=filter_1, limit=5 - ) - reco_result_2 = client.recommend( - collection_name=COLLECTION_NAME, positive=[2], query_filter=filter_2, limit=5 - ) - reco_result_3 = client.recommend( - collection_name=COLLECTION_NAME, positive=[3], query_filter=filter_3, limit=5 - ) - - batch_reco_result = client.recommend_batch( - collection_name=COLLECTION_NAME, requests=recommend_queries + batch_query_result = client.query_batch_points( + collection_name=COLLECTION_NAME, requests=query_points_requests ) - assert len(batch_reco_result) == 3 - assert batch_reco_result[0] == reco_result_1 - assert batch_reco_result[1] == reco_result_2 - assert batch_reco_result[2] == reco_result_3 - - if dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 10, 0): - query_points_requests = [ - models.QueryRequest( - query=query_vector_1, - filter=filter_1, - limit=5, - with_payload=True, - ), - models.QueryRequest( - query=query_vector_2, - filter=filter_2, - limit=5, - with_payload=True, - ), - models.QueryRequest( - query=query_vector_3, - filter=filter_3, - limit=5, - with_payload=True, - ), - ] - single_query_result_1 = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector_1, - query_filter=filter_1, - limit=5, - ) - single_query_result_2 = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector_2, - query_filter=filter_2, - limit=5, - ) - single_query_result_3 = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector_3, - query_filter=filter_3, - limit=5, - ) - - batch_query_result = client.query_batch_points( - collection_name=COLLECTION_NAME, requests=query_points_requests - ) - - assert len(batch_query_result) == 3 - assert batch_query_result[0] == single_query_result_1 - assert batch_query_result[1] == single_query_result_2 - assert batch_query_result[2] == single_query_result_3 + assert len(batch_query_result) == 3 + assert batch_query_result[0] == single_query_result_1 + assert batch_query_result[1] == single_query_result_2 + assert batch_query_result[2] == single_query_result_3 # ------------------ End of batch queries test ---------------- @@ -986,14 +815,14 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): positive = [1, 2, query_vector.tolist()] negative = [] - if None not in (major, minor, patch) and (major, minor, patch) < (1, 6, 0): - positive = [1, 2] - negative = [] - - recommended_points = client.recommend( + recommended_points = client.query_points( collection_name=COLLECTION_NAME, - positive=positive, - negative=negative, + query=models.RecommendQuery( + recommend=models.RecommendInput( + positive=positive, + negative=negative, + ), + ), query_filter=Filter( must=[ # These conditions are required for recommend results FieldCondition( @@ -1005,7 +834,7 @@ def test_qdrant_client_integration(prefer_grpc, numpy_upload, local_mode): limit=5, with_payload=True, with_vectors=False, - ) + ).points assert len(recommended_points) == 5 @@ -1167,12 +996,27 @@ def test_points_crud(prefer_grpc): client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) if client.collection_exists(COLLECTION_NAME): client.delete_collection(collection_name=COLLECTION_NAME, timeout=TIMEOUT) - client.create_collection( + collection_params = dict( collection_name=COLLECTION_NAME, vectors_config=VectorParams(size=DIM, distance=Distance.DOT), timeout=TIMEOUT, ) + major, minor, patch, dev = read_version() + if not dev and None not in (major, minor, patch) and (major, minor, patch) < (1, 16, 0): + client.create_collection(**collection_params) + else: + collection_metadata = {"ownership": "Bart Simpson's property"} + collection_params["metadata"] = collection_metadata # type: ignore + client.create_collection(**collection_params) + collection_info = client.get_collection(COLLECTION_NAME) + assert collection_info.config.metadata == collection_metadata + + new_metadata = {"due_date": "12.12.2222"} + client.update_collection(COLLECTION_NAME, metadata=new_metadata) + updated_collection_info = client.get_collection(COLLECTION_NAME) + assert updated_collection_info.config.metadata == {**collection_metadata, **new_metadata} + # Create a single point client.upsert( collection_name=COLLECTION_NAME, @@ -1250,9 +1094,9 @@ def test_quantization_config(prefer_grpc): assert quantization_config.scalar.quantile == 1.0 assert quantization_config.scalar.always_ram is True - _res = client.search( + _res = client.query_points( collection_name=COLLECTION_NAME, - query_vector=np.random.rand(DIM), + query=np.random.rand(DIM), search_params=SearchParams( quantization=QuantizationSearchParams( rescore=True, @@ -1263,14 +1107,6 @@ def test_quantization_config(prefer_grpc): @pytest.mark.parametrize("prefer_grpc", [False, True]) def test_custom_sharding(prefer_grpc): - major, minor, patch, dev = read_version() - if not dev and None not in (major, minor, patch) and (major, minor, patch) < (1, 7, 0): - pytest.skip("Custom sharding is supported since v1.7.0") - - query_api_available = ( - dev or None in (major, minor, patch) or (major, minor, patch) >= (1, 10, 0) - ) - client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) def init_collection(): @@ -1284,6 +1120,15 @@ def init_collection(): client.create_shard_key(collection_name=COLLECTION_NAME, shard_key=cats_shard_key) client.create_shard_key(collection_name=COLLECTION_NAME, shard_key=dogs_shard_key) + major, minor, patch, dev = read_version() + if major is None or dev or (major, minor, patch) >= (1, 16, 0): + fish_shard_key = "fish" + client.create_shard_key( + collection_name=COLLECTION_NAME, + shard_key=fish_shard_key, + initial_state=models.ReplicaState.ACTIVE, + ) + print("created shard key with replica state") cat_ids = [1, 2, 3] cat_vectors = [np.random.rand(DIM).tolist() for _ in range(len(cat_ids))] @@ -1320,49 +1165,27 @@ def init_collection(): ) query_vector = np.random.rand(DIM) - res = client.search( - collection_name=COLLECTION_NAME, - query_vector=query_vector, - shard_key_selector=cats_shard_key, - ) + res = client.query_points( + collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=cats_shard_key + ).points assert len(res) == 3 for record in res: assert record.shard_key == cats_shard_key - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=cats_shard_key - ) - assert query_res.points == res - query_vector = np.random.rand(DIM) - res = client.search( + res = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, shard_key_selector=[cats_shard_key, dogs_shard_key], - ) + ).points assert len(res) == 6 - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector, - shard_key_selector=[cats_shard_key, dogs_shard_key], - ) - assert query_res.points == res query_vector = np.random.rand(DIM) - res = client.search( + res = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, - ) + query=query_vector, + ).points assert len(res) == 6 - - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector, - ) - assert res == query_res.points # endregion # region upload_collection @@ -1377,24 +1200,16 @@ def init_collection(): ) query_vector = np.random.rand(DIM) - res = client.search( + res = client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, shard_key_selector=cats_shard_key, - ) + ).points assert len(res) == 3 for record in res: assert record.shard_key == cats_shard_key - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, - query=query_vector, - shard_key_selector=cats_shard_key, - ) - assert query_res.points == res - # endregion # region upload_points @@ -1412,31 +1227,17 @@ def init_collection(): ) query_vector = np.random.rand(DIM) - res = client.search( - collection_name=COLLECTION_NAME, - query_vector=query_vector, - shard_key_selector=cats_shard_key, - ) - assert len(res) == 3 - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=cats_shard_key - ) - assert res == query_res.points + res = client.query_points( + collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=cats_shard_key + ).points + assert len(res) == 3 query_vector = np.random.rand(DIM) - res = client.search( - collection_name=COLLECTION_NAME, - query_vector=query_vector, - shard_key_selector=dogs_shard_key, - ) + res = client.query_points( + collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=dogs_shard_key + ).points assert len(res) == 0 - if query_api_available: - query_res = client.query_points( - collection_name=COLLECTION_NAME, query=query_vector, shard_key_selector=dogs_shard_key - ) - assert res == query_res.points # endregion @@ -1445,10 +1246,6 @@ def init_collection(): @pytest.mark.parametrize("prefer_grpc", [False, True]) def test_sparse_vectors(prefer_grpc): - major, minor, patch, dev = read_version() - if not dev and None not in (major, minor, patch) and (major, minor, patch) < (1, 7, 0): - pytest.skip("Sparse vectors are supported since v1.7.0") - client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) if client.collection_exists(COLLECTION_NAME): client.delete_collection(collection_name=COLLECTION_NAME) @@ -1498,17 +1295,12 @@ def test_sparse_vectors(prefer_grpc): ], ) - result = client.search( + result = client.query_points( collection_name=COLLECTION_NAME, - query_vector=models.NamedSparseVector( - name="text", - vector=models.SparseVector( - indices=[1, 7], - values=[2.0, 1.0], - ), - ), + using="text", + query=models.SparseVector(indices=[1, 7], values=[2.0, 1.0]), with_vectors=["text"], - ) + ).points assert len(result) == 2 assert result[0].id == 3 @@ -1525,10 +1317,6 @@ def test_sparse_vectors(prefer_grpc): @pytest.mark.parametrize("prefer_grpc", [False, True]) def test_sparse_vectors_batch(prefer_grpc): - major, minor, patch, dev = read_version() - if not dev and None not in (major, minor, patch) and (major, minor, patch) < (1, 7, 0): - pytest.skip("Sparse vectors are supported since v1.7.0") - client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) if client.collection_exists(COLLECTION_NAME): client.delete_collection(collection_name=COLLECTION_NAME) @@ -1578,24 +1366,22 @@ def test_sparse_vectors_batch(prefer_grpc): ], ) - request = models.SearchRequest( - vector=models.NamedSparseVector( - name="text", - vector=models.SparseVector( - indices=[1, 7], - values=[2.0, 1.0], - ), + request = models.QueryRequest( + query=models.SparseVector( + indices=[1, 7], + values=[2.0, 1.0], ), + using="text", limit=3, with_vector=["text"], ) - results = client.search_batch( + results = client.query_batch_points( collection_name=COLLECTION_NAME, requests=[request], ) - result = results[0] + result = results[0].points assert len(result) == 2 assert result[0].id == 3 @@ -1792,52 +1578,6 @@ def test_insert_float(): assert isinstance(point.payload["value"], float) -def test_locks(): - client = QdrantClient(timeout=TIMEOUT) - if client.collection_exists(COLLECTION_NAME): - client.delete_collection(collection_name=COLLECTION_NAME, timeout=TIMEOUT) - client.create_collection( - collection_name=COLLECTION_NAME, - vectors_config=VectorParams(size=DIM, distance=Distance.DOT), - timeout=TIMEOUT, - ) - - client.lock_storage(reason="testing reason") - - try: - # Create a single point - client.upsert( - collection_name=COLLECTION_NAME, - points=[ - PointStruct( - id=123, - payload={"test": "value"}, - vector=np.random.rand(DIM).tolist(), - ) - ], - wait=True, - ) - assert False, "Should not be able to insert a point when storage is locked" - except Exception as e: - assert "testing reason" in str(e) - pass - - lock_options = client.get_locks() - assert lock_options.write is True - assert lock_options.error_message == "testing reason" - - client.unlock_storage() - - # should be fine now - client.upsert( - collection_name=COLLECTION_NAME, - points=[ - PointStruct(id=123, payload={"test": "value"}, vector=np.random.rand(DIM).tolist()) - ], - wait=True, - ) - - @pytest.mark.parametrize("prefer_grpc", [False, True]) def test_empty_vector(prefer_grpc): client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT) @@ -2085,7 +1825,7 @@ def auth_token_provider(): client.get_collections() assert token == "token_2" - client.unlock_storage() + client.get_collections() assert token == "token_3" token = "" @@ -2099,7 +1839,7 @@ def auth_token_provider(): client.get_collections() assert token == "token_1" - client.unlock_storage() + client.get_collections() assert token == "token_2" @@ -2145,61 +1885,63 @@ def test_read_consistency(prefer_grpc): query_vector = fixture_points[0].vector - client.search( + client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=models.ReadConsistencyType.MAJORITY, ) - client.search( + client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=models.ReadConsistencyType.MAJORITY, ) - client.search( + client.query_points( collection_name=COLLECTION_NAME, - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=2, ) - search_requests = [models.SearchRequest(vector=query_vector, limit=5)] - client.search_batch( + query_requests = [models.QueryRequest(query=query_vector, limit=5)] + client.query_batch_points( collection_name=COLLECTION_NAME, - requests=search_requests, + requests=query_requests, ) - client.search_batch( + client.query_batch_points( collection_name=COLLECTION_NAME, - requests=search_requests, + requests=query_requests, consistency=models.ReadConsistencyType.MAJORITY, ) - client.search_batch(collection_name=COLLECTION_NAME, requests=search_requests, consistency=2) + client.query_batch_points( + collection_name=COLLECTION_NAME, requests=query_requests, consistency=2 + ) - client.search_groups( + client.query_points_groups( collection_name=COLLECTION_NAME, group_by="word", - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=models.ReadConsistencyType.MAJORITY, ) - client.search_groups( + client.query_points_groups( collection_name=COLLECTION_NAME, group_by="word", - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=models.ReadConsistencyType.MAJORITY, ) - client.search_groups( + client.query_points_groups( collection_name=COLLECTION_NAME, group_by="word", - query_vector=query_vector, + query=query_vector, limit=5, # Return 5 closest points consistency=models.ReadConsistencyType.MAJORITY, ) @@ -2239,82 +1981,77 @@ def test_create_payload_index(prefer_grpc): wait=True, ) - major, minor, patch, dev = read_version() - if major is None or dev or (major, minor, patch) >= (1, 11, 0): - client.create_payload_index( - COLLECTION_NAME, "uuid", models.PayloadSchemaType.UUID, wait=True - ) + client.create_payload_index(COLLECTION_NAME, "uuid", models.PayloadSchemaType.UUID, wait=True) - client.create_payload_index( - COLLECTION_NAME, - "keyword_parametrized", - models.KeywordIndexParams( - type=models.KeywordIndexType.KEYWORD, is_tenant=False, on_disk=True - ), - wait=True, - ) - payload_schema = client.get_collection(COLLECTION_NAME).payload_schema - assert payload_schema["keyword_parametrized"].params.is_tenant is False - assert payload_schema["keyword_parametrized"].params.on_disk is True - - client.create_payload_index( - COLLECTION_NAME, - "integer_parametrized", - models.IntegerIndexParams( - type=models.IntegerIndexType.INTEGER, - lookup=True, - range=False, - is_principal=False, - on_disk=True, - ), - wait=True, - ) - if prefer_grpc: - rest_client = QdrantClient() - _ = rest_client.get_collection(COLLECTION_NAME).payload_schema - payload_schema = client.get_collection(COLLECTION_NAME).payload_schema - assert payload_schema["integer_parametrized"].params.lookup is True - assert payload_schema["integer_parametrized"].params.range is False - assert payload_schema["integer_parametrized"].params.is_principal is False - assert payload_schema["integer_parametrized"].params.on_disk is True - - client.create_payload_index( - COLLECTION_NAME, - "float_parametrized", - models.FloatIndexParams( - type=models.FloatIndexType.FLOAT, is_principal=False, on_disk=True - ), - wait=True, - ) + client.create_payload_index( + COLLECTION_NAME, + "keyword_parametrized", + models.KeywordIndexParams( + type=models.KeywordIndexType.KEYWORD, is_tenant=False, on_disk=True + ), + wait=True, + ) + payload_schema = client.get_collection(COLLECTION_NAME).payload_schema + assert payload_schema["keyword_parametrized"].params.is_tenant is False + assert payload_schema["keyword_parametrized"].params.on_disk is True - client.create_payload_index( - COLLECTION_NAME, - "datetime_parametrized", - models.DatetimeIndexParams( - type=models.DatetimeIndexType.DATETIME, is_principal=False, on_disk=True - ), - wait=True, - ) - client.create_payload_index( - COLLECTION_NAME, - "uuid_parametrized", - models.UuidIndexParams(type=models.UuidIndexType.UUID, is_tenant=False, on_disk=True), - wait=True, - ) + client.create_payload_index( + COLLECTION_NAME, + "integer_parametrized", + models.IntegerIndexParams( + type=models.IntegerIndexType.INTEGER, + lookup=True, + range=False, + is_principal=False, + on_disk=True, + ), + wait=True, + ) + if prefer_grpc: + rest_client = QdrantClient() + _ = rest_client.get_collection(COLLECTION_NAME).payload_schema + payload_schema = client.get_collection(COLLECTION_NAME).payload_schema + assert payload_schema["integer_parametrized"].params.lookup is True + assert payload_schema["integer_parametrized"].params.range is False + assert payload_schema["integer_parametrized"].params.is_principal is False + assert payload_schema["integer_parametrized"].params.on_disk is True - if major is None or dev or (major, minor, patch) >= (1, 11, 1): - client.create_payload_index( - COLLECTION_NAME, - "geo_parametrized", - models.GeoIndexParams(type=models.GeoIndexType.GEO), - wait=True, - ) - client.create_payload_index( - COLLECTION_NAME, - "bool_parametrized", - models.BoolIndexParams(type=models.BoolIndexType.BOOL), - wait=True, - ) + client.create_payload_index( + COLLECTION_NAME, + "float_parametrized", + models.FloatIndexParams( + type=models.FloatIndexType.FLOAT, is_principal=False, on_disk=True + ), + wait=True, + ) + + client.create_payload_index( + COLLECTION_NAME, + "datetime_parametrized", + models.DatetimeIndexParams( + type=models.DatetimeIndexType.DATETIME, is_principal=False, on_disk=True + ), + wait=True, + ) + client.create_payload_index( + COLLECTION_NAME, + "uuid_parametrized", + models.UuidIndexParams(type=models.UuidIndexType.UUID, is_tenant=False, on_disk=True), + wait=True, + ) + + client.create_payload_index( + COLLECTION_NAME, + "geo_parametrized", + models.GeoIndexParams(type=models.GeoIndexType.GEO), + wait=True, + ) + client.create_payload_index( + COLLECTION_NAME, + "bool_parametrized", + models.BoolIndexParams(type=models.BoolIndexType.BOOL), + wait=True, + ) @pytest.mark.parametrize("prefer_grpc", (False, True)) @@ -2525,12 +2262,3 @@ def run_upload_points(): with pytest.raises(exception_class): client.upload_points(COLLECTION_NAME, points=points, wait=True, max_retries=1) - - -if __name__ == "__main__": - test_qdrant_client_integration() - test_points_crud() - test_has_id_condition() - test_insert_float() - test_create_payload_index() - test_strict_mode() diff --git a/tests/type_stub.py b/tests/type_stub.py index 64e20b69..8a2e5cfd 100644 --- a/tests/type_stub.py +++ b/tests/type_stub.py @@ -1,5 +1,3 @@ -import numpy as np - from qdrant_client import QdrantClient from qdrant_client import models as rest_models from qdrant_client.conversions import common_types as types @@ -27,76 +25,9 @@ qdrant_client.get_collection("collection") qdrant_client.collection_exists("collection") qdrant_client.get_collections() -qdrant_client.get_locks() qdrant_client.list_full_snapshots() qdrant_client.list_snapshots("collection") -qdrant_client.lock_storage("reason") qdrant_client.overwrite_payload("collection", {}, []) -qdrant_client.recommend( - "collection", - [], - [], - rest_models.Filter(), - rest_models.SearchParams(), - 10, - 0, - True, - True, - 1.0, - "using", - rest_models.LookupLocation(collection=""), - rest_models.RecommendStrategy.AVERAGE_VECTOR, - 1, -) -qdrant_client.recommend_batch( - "collection", - [ - rest_models.RecommendRequest( - positive=[], - negative=[], - filter=None, - params=None, - limit=10, - offset=0, - with_payload=True, - with_vector=True, - score_threshold=0.5, - using=None, - lookup_from=None, - ) - ], -) -qdrant_client.discover( - "collection", - None, - [], - rest_models.Filter(), - rest_models.SearchParams(), - 10, - 0, - True, - True, - "using", - rest_models.LookupLocation(collection=""), - 1, -) -qdrant_client.discover_batch( - "collection", - [ - rest_models.DiscoverRequest( - target=None, - context=[], - filter=rest_models.Filter(), - params=rest_models.SearchParams(), - limit=10, - offset=0, - with_vector=True, - with_payload=True, - using="using", - lookup_from=rest_models.LookupLocation(collection=""), - ), - ], -) qdrant_client.recover_snapshot("collection", "location", rest_models.SnapshotPriority.REPLICA) qdrant_client.create_collection( "collection", @@ -118,8 +49,9 @@ rest_models.OptimizersConfigDiff(), rest_models.WalConfigDiff(), rest_models.ScalarQuantization(scalar=ScalarQuantizationConfig(type=ScalarType.INT8)), - None, 5, + rest_models.StrictModeConfig(), + {}, ) qdrant_client.recreate_collection( "collection", @@ -142,7 +74,7 @@ rest_models.WalConfigDiff(), rest_models.ScalarQuantization(scalar=ScalarQuantizationConfig(type=ScalarType.INT8)), None, - 5, + rest_models.StrictModeConfig(), ) qdrant_client.recreate_collection( "collection", @@ -167,21 +99,11 @@ product=ProductQuantizationConfig(compression=CompressionRatio.X32) ), None, - 5, + rest_models.StrictModeConfig(), ) qdrant_client.retrieve("collection", []) qdrant_client.scroll("collection") -qdrant_client.search_batch( - "collection", - [ - rest_models.SearchRequest( - vector=[1.0, 0.0, 3.0], - limit=10, - ) - ], -) qdrant_client.set_payload("collection", {}, [], key=None, wait=True) -qdrant_client.unlock_storage() qdrant_client.update_collection( "collection", rest_models.OptimizersConfigDiff( @@ -202,54 +124,11 @@ ) ] ) -qdrant_client.upload_records("collection", []) qdrant_client.upload_points("collection", []) qdrant_client.upsert("collection", []) - -qdrant_client.search("collection", [123], with_payload=["str", "another one", "and another one"]) -# pyright currently is not happy with np.array and treating it as a "partially unknown type" -qdrant_client.search( - "collection", - np.array([123]), # type: ignore - with_payload=["str", "another one", "and another one"], -) qdrant_client.upload_collection("collection", [[123]]) qdrant_client.update_vectors("collection", [rest_models.PointVectors(id=1, vector=[123])], False) qdrant_client.delete_vectors("collection", [], [123, 32, 44]) -qdrant_client.search_groups( - "collection", - [123], - "rand_field", - rest_models.Filter( - must=[rest_models.FieldCondition(key="field", match=rest_models.MatchValue(value="123"))] - ), - rest_models.SearchParams(hnsw_ef=182), - 2, - 3, - True, - True, - 0.2, -) -qdrant_client.recommend_groups( - "collection", - "rand_field", - [14], - [], - rest_models.Filter( - must=[rest_models.FieldCondition(key="field", match=rest_models.MatchValue(value="123"))] - ), - rest_models.SearchParams(hnsw_ef=182), - 2, - 3, - 3.0, - True, - True, - "using", - rest_models.LookupLocation(collection="start"), - None, -) - - qdrant_client.batch_update_points( collection_name="batchcollection", update_operations=[ diff --git a/tools/async_client_generator/base_client_generator.py b/tools/async_client_generator/base_client_generator.py index fb8e75b9..270ff97c 100644 --- a/tools/async_client_generator/base_client_generator.py +++ b/tools/async_client_generator/base_client_generator.py @@ -30,7 +30,7 @@ def __init__( # Parse the code into an AST base_client_generator = BaseClientGenerator( - keep_sync=["__init__", "upload_records", "upload_collection", "upload_points", "migrate"], + keep_sync=["__init__", "upload_collection", "upload_points", "migrate"], class_replace_map={"QdrantBase": "AsyncQdrantBase"}, constant_replace_map={"QdrantBase": "AsyncQdrantBase"}, ) diff --git a/tools/generate_grpc_client.sh b/tools/generate_grpc_client.sh index ebee7295..a9a97c2f 100755 --- a/tools/generate_grpc_client.sh +++ b/tools/generate_grpc_client.sh @@ -26,8 +26,8 @@ fi source "$VENV_DIR/bin/activate" pip install --upgrade pip -pip install "grpcio==1.48.2" -pip install "grpcio-tools==1.48.2" +pip install "grpcio==1.62.0" # as of 1.63.0 grpc complaining that our minimal supported version 1.48 is too old +pip install "grpcio-tools==1.62.0" # as of 1.63.0 grpc complaining that our minimal supported version 1.48 is too old pip install "mypy-protobuf==3.3.0" # ^3.3.0 cd "$QDRANT_PATH"