From 18fb916e3e5c20daca0f13f3a09fd9816f7e24d9 Mon Sep 17 00:00:00 2001 From: Eugene Yurtsev Date: Sun, 1 Oct 2023 13:10:24 -0400 Subject: [PATCH] Release 0.0.2 (#6) Release 0.0.2 --- poetry.lock | 35 ++++++------- pyproject.toml | 4 +- tests/unit_tests/test_encoders.py | 87 ++++++++++++++----------------- 3 files changed, 56 insertions(+), 70 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0eb502ea..31e68d1c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1630,23 +1630,25 @@ version = "0.0.305" description = "Building applications with LLMs through composability" optional = false python-versions = ">=3.8.1,<4.0" -files = [] -develop = false +files = [ + {file = "langchain-0.0.305-py3-none-any.whl", hash = "sha256:cd96afe7a7bf567b8ab437bc2826c3693cf0fdfd24935f17c452b58542dba75d"}, + {file = "langchain-0.0.305.tar.gz", hash = "sha256:d703ce82b6939f3f79188bf5a0bd637d32fc4b2bc1b568adad30a0282ad65bbf"}, +] [package.dependencies] -aiohttp = "^3.8.3" +aiohttp = ">=3.8.3,<4.0.0" anyio = "<4.0" -async-timeout = {version = "^4.0.0", markers = "python_version < \"3.11\""} -dataclasses-json = ">= 0.5.7, < 0.7" -jsonpatch = "^1.33" -langsmith = "~0.0.38" -numexpr = "^2.8.4" -numpy = "^1" +async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""} +dataclasses-json = ">=0.5.7,<0.7" +jsonpatch = ">=1.33,<2.0" +langsmith = ">=0.0.38,<0.1.0" +numexpr = ">=2.8.4,<3.0.0" +numpy = ">=1,<2" pydantic = ">=1,<3" PyYAML = ">=5.3" -requests = "^2" +requests = ">=2,<3" SQLAlchemy = ">=1.4,<3" -tenacity = "^8.1.0" +tenacity = ">=8.1.0,<9.0.0" [package.extras] all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.9,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=4,<5)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "librosa (>=0.10.0.post2,<0.11.0)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=1.2.4,<2.0.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<4)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "python-arango (>=7.5.9,<8.0.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.6.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"] @@ -1655,20 +1657,13 @@ clarifai = ["clarifai (>=9.1.0)"] cohere = ["cohere (>=4,<5)"] docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"] embeddings = ["sentence-transformers (>=2,<3)"] -extended-testing = ["amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "openai (>=0,<1)", "openai (>=0,<1)", "openapi-schema-pydantic (>=1.2,<2.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] +extended-testing = ["amazon-textract-caller (<2)", "anthropic (>=0.3.11,<0.4.0)", "arxiv (>=1.4,<2.0)", "assemblyai (>=0.17.0,<0.18.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.1.0,<0.2.0)", "chardet (>=5.1.0,<6.0.0)", "dashvector (>=1.0.1,<2.0.0)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "feedparser (>=6.0.10,<7.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gitpython (>=3.1.32,<4.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "markdownify (>=0.11.6,<0.12.0)", "motor (>=3.3.1,<4.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "newspaper3k (>=0.2.8,<0.3.0)", "openai (>=0,<1)", "openapi-schema-pydantic (>=1.2,<2.0)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "sqlite-vss (>=0.1.2,<0.2.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "timescale-vector (>=0.0.1,<0.0.2)", "tqdm (>=4.48.0)", "xata (>=1.0.0a7,<2.0.0)", "xmltodict (>=0.13.0,<0.14.0)"] javascript = ["esprima (>=4.0.1,<5.0.0)"] llms = ["clarifai (>=9.1.0)", "cohere (>=4,<5)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"] openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.6.0)"] qdrant = ["qdrant-client (>=1.3.1,<2.0.0)"] text-helpers = ["chardet (>=5.1.0,<6.0.0)"] -[package.source] -type = "git" -url = "https://github.com/langchain-ai/langchain" -reference = "HEAD" -resolved_reference = "8b4cb4eb60e3935eea895aa955d68ca0afce788c" -subdirectory = "libs/langchain" - [[package]] name = "langsmith" version = "0.0.41" @@ -3799,4 +3794,4 @@ server = ["fastapi", "sse-starlette"] [metadata] lock-version = "2.0" python-versions = ">3.8.1,<4" -content-hash = "a2e85c00c6ce7a2fd5e0aa4322135b9d8f4a8bc6bc65c1200f913f769fc81d94" +content-hash = "e05ba3003172eadf9aad60a8d5a59f288ebe18f4f513392cd765b9210097d10c" diff --git a/pyproject.toml b/pyproject.toml index 67ab0060..82a1bef2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langserve" -version = "0.0.1" +version = "0.0.2" description = "" readme = "README.md" authors = ["LangChain"] @@ -14,7 +14,7 @@ fastapi = {version = ">0.90.1", optional = true} sse-starlette = {version = "^1.3.0", optional = true} httpx-sse = {version = "^0.3.1", optional = true} pydantic = "^1" -langchain = { git = "https://github.com/langchain-ai/langchain", subdirectory = "libs/langchain" } +langchain = ">=0.0.305" [tool.poetry.group.dev.dependencies] jupyterlab = "^3.6.1" diff --git a/tests/unit_tests/test_encoders.py b/tests/unit_tests/test_encoders.py index 50b041ba..6cd298c7 100644 --- a/tests/unit_tests/test_encoders.py +++ b/tests/unit_tests/test_encoders.py @@ -4,7 +4,6 @@ import pytest from langchain.schema.messages import ( HumanMessage, - HumanMessageChunk, SystemMessage, ) @@ -33,7 +32,6 @@ "additional_kwargs": {}, "type": "human", "example": False, - "is_chunk": False, } ] }, @@ -46,7 +44,6 @@ "content": "Hello", "example": False, "type": "human", - "is_chunk": False, }, ), # Test with a list containing mixed elements @@ -58,60 +55,55 @@ "content": "Hello", "example": False, "type": "human", - "is_chunk": False, }, { "additional_kwargs": {}, "content": "Hi", "type": "system", - "is_chunk": False, }, 42, "world", ], ), - # Attention: This test is not correct right now - # Test with full and chunk messages - ( - [HumanMessage(content="Hello"), HumanMessageChunk(content="Hi")], - [ - { - "additional_kwargs": {}, - "content": "Hello", - "example": False, - "type": "human", - "is_chunk": False, - }, - { - "additional_kwargs": {}, - "content": "Hi", - "example": False, - "type": "human", - "is_chunk": True, - }, - ], - ), - # Attention: This test is not correct right now - # Test with full and chunk messages - ( - [HumanMessageChunk(content="Hello"), HumanMessage(content="Hi")], - [ - { - "additional_kwargs": {}, - "content": "Hello", - "example": False, - "type": "human", - "is_chunk": True, - }, - { - "additional_kwargs": {}, - "content": "Hi", - "example": False, - "type": "human", - "is_chunk": False, - }, - ], - ), + # Uncomment when langchain 0.0.306 is released + # # Attention: This test is not correct right now + # # Test with full and chunk messages + # ( + # [HumanMessage(content="Hello"), HumanMessageChunk(content="Hi")], + # [ + # { + # "additional_kwargs": {}, + # "content": "Hello", + # "example": False, + # "type": "human", + # }, + # { + # "additional_kwargs": {}, + # "content": "Hi", + # "example": False, + # "type": "human", + # }, + # ], + # ), + # # Attention: This test is not correct right now + # # Test with full and chunk messages + # ( + # [HumanMessageChunk(content="Hello"), HumanMessage(content="Hi")], + # [ + # { + # "additional_kwargs": {}, + # "content": "Hello", + # "example": False, + # "type": "human", + # }, + # { + # "additional_kwargs": {}, + # "content": "Hi", + # "example": False, + # "type": "human", + # }, + # ], + # ), # Test with a dictionary containing mixed elements ( { @@ -125,7 +117,6 @@ "content": "Greetings", "example": False, "type": "human", - "is_chunk": False, }, "numbers": [1, 2, 3], "boom": "Hello, world!",