-
Notifications
You must be signed in to change notification settings - Fork 2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat: SentenceTransformersDocumentEmbedder and SentenceTransformersTe…
…xtEmbedder can accept and pass any arguments to SentenceTransformer.encode (#8806) * feat: SentenceTransformersDocumentEmbedder and SentenceTransformersTextEmbedder can accept and pass any arguments to SentenceTransformer.encode * refactor: encode_kwargs parameter of SentenceTransformersDocumentEmbedder and SentenceTransformersTextEmbedder mae to be the last positional parameter for backward compatibility reasons * docs: added explanation for encode_kwargs in SentenceTransformersTextEmbedder and SentenceTransformersDocumentEmbedder * test: added tests for encode_kwargs in SentenceTransformersTextEmbedder and SentenceTransformersDocumentEmbedder * doc: removed empty lines from docstrings of SentenceTransformersTextEmbedder and SentenceTransformersDocumentEmbedder * refactor: encode_kwargs parameter of SentenceTransformersDocumentEmbedder and SentenceTransformersTextEmbedder mae to be the last positional parameter for backward compatibility (part II.)
- Loading branch information
Showing
5 changed files
with
59 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
6 changes: 6 additions & 0 deletions
6
releasenotes/notes/add-encode-kwargs-sentence-transformers-f4d885f6c5b1706f.yaml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,6 @@ | ||
--- | ||
enhancements: | ||
- | | ||
Enhanced `SentenceTransformersDocumentEmbedder` and `SentenceTransformersTextEmbedder` to accept | ||
an additional parameter, which is passed directly to the underlying `SentenceTransformer.encode` method | ||
for greater flexibility in embedding customization. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,9 +1,9 @@ | ||
# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> | ||
# | ||
# SPDX-License-Identifier: Apache-2.0 | ||
import random | ||
from unittest.mock import MagicMock, patch | ||
|
||
import random | ||
import pytest | ||
import torch | ||
|
||
|
@@ -79,6 +79,7 @@ def test_to_dict(self): | |
"truncate_dim": None, | ||
"model_kwargs": None, | ||
"tokenizer_kwargs": None, | ||
"encode_kwargs": None, | ||
"config_kwargs": None, | ||
"precision": "float32", | ||
}, | ||
|
@@ -102,6 +103,7 @@ def test_to_dict_with_custom_init_parameters(self): | |
tokenizer_kwargs={"model_max_length": 512}, | ||
config_kwargs={"use_memory_efficient_attention": True}, | ||
precision="int8", | ||
encode_kwargs={"task": "clustering"}, | ||
) | ||
data = component.to_dict() | ||
|
||
|
@@ -124,6 +126,7 @@ def test_to_dict_with_custom_init_parameters(self): | |
"tokenizer_kwargs": {"model_max_length": 512}, | ||
"config_kwargs": {"use_memory_efficient_attention": True}, | ||
"precision": "int8", | ||
"encode_kwargs": {"task": "clustering"}, | ||
}, | ||
} | ||
|
||
|
@@ -316,6 +319,20 @@ def test_embed_metadata(self): | |
precision="float32", | ||
) | ||
|
||
def test_embed_encode_kwargs(self): | ||
embedder = SentenceTransformersDocumentEmbedder(model="model", encode_kwargs={"task": "retrieval.passage"}) | ||
embedder.embedding_backend = MagicMock() | ||
documents = [Document(content=f"document number {i}") for i in range(5)] | ||
embedder.run(documents=documents) | ||
embedder.embedding_backend.embed.assert_called_once_with( | ||
["document number 0", "document number 1", "document number 2", "document number 3", "document number 4"], | ||
batch_size=32, | ||
show_progress_bar=True, | ||
normalize_embeddings=False, | ||
precision="float32", | ||
task="retrieval.passage", | ||
) | ||
|
||
def test_prefix_suffix(self): | ||
embedder = SentenceTransformersDocumentEmbedder( | ||
model="model", | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,11 +1,11 @@ | ||
# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]> | ||
# | ||
# SPDX-License-Identifier: Apache-2.0 | ||
import random | ||
from unittest.mock import MagicMock, patch | ||
|
||
import torch | ||
import random | ||
import pytest | ||
import torch | ||
|
||
from haystack.components.embedders.sentence_transformers_text_embedder import SentenceTransformersTextEmbedder | ||
from haystack.utils import ComponentDevice, Secret | ||
|
@@ -70,6 +70,7 @@ def test_to_dict(self): | |
"truncate_dim": None, | ||
"model_kwargs": None, | ||
"tokenizer_kwargs": None, | ||
"encode_kwargs": None, | ||
"config_kwargs": None, | ||
"precision": "float32", | ||
}, | ||
|
@@ -91,6 +92,7 @@ def test_to_dict_with_custom_init_parameters(self): | |
tokenizer_kwargs={"model_max_length": 512}, | ||
config_kwargs={"use_memory_efficient_attention": False}, | ||
precision="int8", | ||
encode_kwargs={"task": "clustering"}, | ||
) | ||
data = component.to_dict() | ||
assert data == { | ||
|
@@ -110,6 +112,7 @@ def test_to_dict_with_custom_init_parameters(self): | |
"tokenizer_kwargs": {"model_max_length": 512}, | ||
"config_kwargs": {"use_memory_efficient_attention": False}, | ||
"precision": "int8", | ||
"encode_kwargs": {"task": "clustering"}, | ||
}, | ||
} | ||
|
||
|
@@ -297,3 +300,17 @@ def test_run_quantization(self): | |
|
||
assert len(embedding_def) == 768 | ||
assert all(isinstance(el, int) for el in embedding_def) | ||
|
||
def test_embed_encode_kwargs(self): | ||
embedder = SentenceTransformersTextEmbedder(model="model", encode_kwargs={"task": "retrieval.query"}) | ||
embedder.embedding_backend = MagicMock() | ||
text = "a nice text to embed" | ||
embedder.run(text=text) | ||
embedder.embedding_backend.embed.assert_called_once_with( | ||
[text], | ||
batch_size=32, | ||
show_progress_bar=True, | ||
normalize_embeddings=False, | ||
precision="float32", | ||
task="retrieval.query", | ||
) |