Skip to content

Commit

Permalink
examples: update clients (#768)
Browse files Browse the repository at this point in the history
  • Loading branch information
eyurtsev authored Sep 14, 2024
1 parent 42b61a6 commit dc04672
Show file tree
Hide file tree
Showing 4 changed files with 39 additions and 203 deletions.
62 changes: 9 additions & 53 deletions examples/configurable_chain/client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,7 @@
"tags": []
},
"outputs": [],
"source": [
"import requests\n",
"\n",
"inputs = {\"input\": {\"topic\": \"sports\"}}\n",
"response = requests.post(\"http://localhost:8000/configurable_temp/invoke\", json=inputs)\n",
"\n",
"response.json()"
]
"source": ["import requests\n\ninputs = {\"input\": {\"topic\": \"sports\"}}\nresponse = requests.post(\"http://localhost:8000/configurable_temp/invoke\", json=inputs)\n\nresponse.json()"]
},
{
"cell_type": "markdown",
Expand All @@ -46,11 +39,7 @@
"tags": []
},
"outputs": [],
"source": [
"from langserve import RemoteRunnable\n",
"\n",
"remote_runnable = RemoteRunnable(\"http://localhost:8000/configurable_temp\")"
]
"source": ["from langserve import RemoteRunnable\n\nremote_runnable = RemoteRunnable(\"http://localhost:8000/configurable_temp\")"]
},
{
"cell_type": "markdown",
Expand All @@ -66,9 +55,7 @@
"tags": []
},
"outputs": [],
"source": [
"response = await remote_runnable.ainvoke({\"topic\": \"sports\"})"
]
"source": ["response = await remote_runnable.ainvoke({\"topic\": \"sports\"})"]
},
{
"cell_type": "markdown",
Expand All @@ -84,11 +71,7 @@
"tags": []
},
"outputs": [],
"source": [
"from langchain.schema.runnable.config import RunnableConfig\n",
"\n",
"remote_runnable.batch([{\"topic\": \"sports\"}, {\"topic\": \"cars\"}])"
]
"source": ["from langchain_core.runnables import RunnableConfig\n\nremote_runnable.batch([{\"topic\": \"sports\"}, {\"topic\": \"cars\"}])"]
},
{
"cell_type": "markdown",
Expand All @@ -104,10 +87,7 @@
"tags": []
},
"outputs": [],
"source": [
"async for chunk in remote_runnable.astream({\"topic\": \"bears, but a bit verbose\"}):\n",
" print(chunk, end=\"\", flush=True)"
]
"source": ["async for chunk in remote_runnable.astream({\"topic\": \"bears, but a bit verbose\"}):\n print(chunk, end=\"\", flush=True)"]
},
{
"cell_type": "markdown",
Expand Down Expand Up @@ -157,14 +137,7 @@
"tags": []
},
"outputs": [],
"source": [
"await remote_runnable.ainvoke(\n",
" {\"topic\": \"sports\"},\n",
" config={\n",
" \"configurable\": {\"prompt\": \"how to say {topic} in french\", \"llm\": \"low_temp\"}\n",
" },\n",
")"
]
"source": ["await remote_runnable.ainvoke(\n {\"topic\": \"sports\"},\n config={\n \"configurable\": {\"prompt\": \"how to say {topic} in french\", \"llm\": \"low_temp\"}\n },\n)"]
},
{
"cell_type": "markdown",
Expand Down Expand Up @@ -221,13 +194,7 @@
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The model will fail with an auth error\n",
"unauthenticated_response = requests.post(\n",
" \"http://localhost:8000/auth_from_header/invoke\", json={\"input\": \"hello\"}\n",
")\n",
"unauthenticated_response.json()"
]
"source": ["# The model will fail with an auth error\nunauthenticated_response = requests.post(\n \"http://localhost:8000/auth_from_header/invoke\", json={\"input\": \"hello\"}\n)\nunauthenticated_response.json()"]
},
{
"cell_type": "markdown",
Expand All @@ -244,25 +211,14 @@
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# The model will succeed as long as the above shell script is run previously\n",
"import os\n",
"\n",
"test_key = os.environ[\"TEST_API_KEY\"]\n",
"authenticated_response = requests.post(\n",
" \"http://localhost:8000/auth_from_header/invoke\",\n",
" json={\"input\": \"hello\"},\n",
" headers={\"x-api-key\": test_key},\n",
")\n",
"authenticated_response.json()"
]
"source": ["# The model will succeed as long as the above shell script is run previously\nimport os\n\ntest_key = os.environ[\"TEST_API_KEY\"]\nauthenticated_response = requests.post(\n \"http://localhost:8000/auth_from_header/invoke\",\n json={\"input\": \"hello\"},\n headers={\"x-api-key\": test_key},\n)\nauthenticated_response.json()"]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
"source": [""]
}
],
"metadata": {
Expand Down
94 changes: 13 additions & 81 deletions examples/llm/client.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
"tags": []
},
"outputs": [],
"source": [
"from langchain.prompts.chat import ChatPromptTemplate"
]
"source": ["from langchain_core.prompts import ChatPromptTemplate"]
},
{
"cell_type": "code",
Expand All @@ -27,12 +25,7 @@
"tags": []
},
"outputs": [],
"source": [
"from langserve import RemoteRunnable\n",
"\n",
"openai_llm = RemoteRunnable(\"http://localhost:8000/openai/\")\n",
"anthropic = RemoteRunnable(\"http://localhost:8000/anthropic/\")"
]
"source": ["from langserve import RemoteRunnable\n\nopenai_llm = RemoteRunnable(\"http://localhost:8000/openai/\")\nanthropic = RemoteRunnable(\"http://localhost:8000/anthropic/\")"]
},
{
"cell_type": "markdown",
Expand All @@ -48,18 +41,7 @@
"tags": []
},
"outputs": [],
"source": [
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a highly educated person who loves to use big words. \"\n",
" + \"You are also concise. Never answer in more than three sentences.\",\n",
" ),\n",
" (\"human\", \"Tell me about your favorite novel\"),\n",
" ]\n",
").format_messages()"
]
"source": ["prompt = ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a highly educated person who loves to use big words. \"\n + \"You are also concise. Never answer in more than three sentences.\",\n ),\n (\"human\", \"Tell me about your favorite novel\"),\n ]\n).format_messages()"]
},
{
"cell_type": "markdown",
Expand All @@ -86,9 +68,7 @@
"output_type": "execute_result"
}
],
"source": [
"anthropic.invoke(prompt)"
]
"source": ["anthropic.invoke(prompt)"]
},
{
"cell_type": "code",
Expand All @@ -97,9 +77,7 @@
"tags": []
},
"outputs": [],
"source": [
"openai_llm.invoke(prompt)"
]
"source": ["openai_llm.invoke(prompt)"]
},
{
"cell_type": "markdown",
Expand All @@ -126,9 +104,7 @@
"output_type": "execute_result"
}
],
"source": [
"await openai_llm.ainvoke(prompt)"
]
"source": ["await openai_llm.ainvoke(prompt)"]
},
{
"cell_type": "code",
Expand All @@ -149,9 +125,7 @@
"output_type": "execute_result"
}
],
"source": [
"anthropic.batch([prompt, prompt])"
]
"source": ["anthropic.batch([prompt, prompt])"]
},
{
"cell_type": "code",
Expand All @@ -172,9 +146,7 @@
"output_type": "execute_result"
}
],
"source": [
"await anthropic.abatch([prompt, prompt])"
]
"source": ["await anthropic.abatch([prompt, prompt])"]
},
{
"cell_type": "markdown",
Expand All @@ -198,10 +170,7 @@
]
}
],
"source": [
"for chunk in anthropic.stream(prompt):\n",
" print(chunk.content, end=\"\", flush=True)"
]
"source": ["for chunk in anthropic.stream(prompt):\n print(chunk.content, end=\"\", flush=True)"]
},
{
"cell_type": "code",
Expand All @@ -218,19 +187,14 @@
]
}
],
"source": [
"async for chunk in anthropic.astream(prompt):\n",
" print(chunk.content, end=\"\", flush=True)"
]
"source": ["async for chunk in anthropic.astream(prompt):\n print(chunk.content, end=\"\", flush=True)"]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"from langchain.schema.runnable import RunnablePassthrough"
]
"source": ["from langchain_core.runnables import RunnablePassthrough"]
},
{
"cell_type": "code",
Expand All @@ -239,37 +203,7 @@
"tags": []
},
"outputs": [],
"source": [
"comedian_chain = (\n",
" ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"You are a comedian that sometimes tells funny jokes and other times you just state facts that are not funny. Please either tell a joke or state fact now but only output one.\",\n",
" ),\n",
" ]\n",
" )\n",
" | openai_llm\n",
")\n",
"\n",
"joke_classifier_chain = (\n",
" ChatPromptTemplate.from_messages(\n",
" [\n",
" (\n",
" \"system\",\n",
" \"Please determine if the joke is funny. Say `funny` if it's funny and `not funny` if not funny. Then repeat the first five words of the joke for reference...\",\n",
" ),\n",
" (\"human\", \"{joke}\"),\n",
" ]\n",
" )\n",
" | anthropic\n",
")\n",
"\n",
"\n",
"chain = {\"joke\": comedian_chain} | RunnablePassthrough.assign(\n",
" classification=joke_classifier_chain\n",
")"
]
"source": ["comedian_chain = (\n ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"You are a comedian that sometimes tells funny jokes and other times you just state facts that are not funny. Please either tell a joke or state fact now but only output one.\",\n ),\n ]\n )\n | openai_llm\n)\n\njoke_classifier_chain = (\n ChatPromptTemplate.from_messages(\n [\n (\n \"system\",\n \"Please determine if the joke is funny. Say `funny` if it's funny and `not funny` if not funny. Then repeat the first five words of the joke for reference...\",\n ),\n (\"human\", \"{joke}\"),\n ]\n )\n | anthropic\n)\n\n\nchain = {\"joke\": comedian_chain} | RunnablePassthrough.assign(\n classification=joke_classifier_chain\n)"]
},
{
"cell_type": "code",
Expand All @@ -290,9 +224,7 @@
"output_type": "execute_result"
}
],
"source": [
"chain.invoke({})"
]
"source": ["chain.invoke({})"]
}
],
"metadata": {
Expand Down
Loading

0 comments on commit dc04672

Please sign in to comment.