Skip to content

Commit

Permalink
v1->v2; Add Human:/Assistant: formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
zack-anthropic committed Sep 18, 2023
1 parent 57de851 commit ef54f91
Show file tree
Hide file tree
Showing 10 changed files with 73 additions and 45 deletions.
6 changes: 3 additions & 3 deletions 01_Generation/00_generate_w_bedrock.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@
"source": [
"## Generate text\n",
"\n",
"Following on the use case explained above, let's prepare an input for the Amazon Bedrock service to generate an email"
"Following on the use case explained above, let's prepare an input for the Amazon Bedrock service to generate an email. Note that this prompt would need to be modified with [Human:/Assistant: formatting for Claude.](https://docs.anthropic.com/claude/docs/human-and-assistant-formatting)"
]
},
{
Expand Down Expand Up @@ -181,8 +181,8 @@
"- `amazon.titan-tg1-large`\n",
"- `ai21.j2-grande-instruct`\n",
"- `ai21.j2-jumbo-instruct`\n",
"- `anthropic.claude-instant-v1`\n",
"- `anthropic.claude-v1`"
"- `anthropic.claude-instant-v2`\n",
"- `anthropic.claude-v2`"
]
},
{
Expand Down
14 changes: 9 additions & 5 deletions 01_Generation/01_zero_shot_generation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@
"- amazon.titan-tg1-large\n",
"- ai21.j2-grande-instruct\n",
"- ai21.j2-jumbo-instruct\n",
"- anthropic.claude-instant-v1\n",
"- anthropic.claude-v1\n",
"- anthropic.claude-instant-v2\n",
"- anthropic.claude-v2\n",
"\n",
"Note that different models support different `model_kwargs`."
]
Expand All @@ -152,7 +152,7 @@
"}\n",
"\n",
"textgen_llm = Bedrock(\n",
" model_id=\"anthropic.claude-v1\",\n",
" model_id=\"anthropic.claude-v2\",\n",
" client=boto3_bedrock,\n",
" model_kwargs=inference_modifier,\n",
")"
Expand Down Expand Up @@ -198,9 +198,13 @@
},
"outputs": [],
"source": [
"response = textgen_llm(\"\"\"Write an email from Bob, Customer Service Manager, \n",
"response = textgen_llm(\"\"\"\n",
"\n",
"Human: Write an email from Bob, Customer Service Manager, \n",
"to the customer \"John Doe\" that provided negative feedback on the service \n",
"provided by our customer support engineer.\\n\\nHuman:\"\"\")\n",
"provided by our customer support engineer.\n",
"\n",
"Assistant:\"\"\")\n",
"\n",
"print_ww(response)"
]
Expand Down
14 changes: 8 additions & 6 deletions 01_Generation/02_contextual_generation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,8 @@
"- amazon.titan-tg1-large\n",
"- ai21.j2-grande-instruct\n",
"- ai21.j2-jumbo-instruct\n",
"- anthropic.claude-instant-v1\n",
"- anthropic.claude-v1\n",
"- anthropic.claude-instant-v2\n",
"- anthropic.claude-v2\n",
"\n",
"Note that different models support different `model_kwargs`."
]
Expand All @@ -158,7 +158,7 @@
" \"stop_sequences\": [\"\\n\\nHuman\"]\n",
" }\n",
"\n",
"textgen_llm = Bedrock(model_id = \"anthropic.claude-v1\",\n",
"textgen_llm = Bedrock(model_id = \"anthropic.claude-v2\",\n",
" client = boto3_bedrock, \n",
" model_kwargs = inference_modifier \n",
" )\n"
Expand Down Expand Up @@ -190,9 +190,11 @@
"# Create a prompt template that has multiple input variables\n",
"multi_var_prompt = PromptTemplate(\n",
" input_variables=[\"customerServiceManager\", \"customerName\", \"feedbackFromCustomer\"], \n",
" template=\"\"\"Create an apology email from the Service Manager {customerServiceManager} to {customerName}. \n",
" in response to the following feedback that was received from the customer: {feedbackFromCustomer}.\n",
" \"\"\"\n",
" template=\"\"\"\n",
"\n",
"Human: Create an apology email from the Service Manager {customerServiceManager} to {customerName} in response to the following feedback that was received from the customer: {feedbackFromCustomer}.\n",
"\n",
"Assistant:\"\"\"\n",
")\n",
"\n",
"# Pass in values to the input variables\n",
Expand Down
9 changes: 5 additions & 4 deletions 02_Summarization/01.small-text-summarization-claude.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@
" \"top_p\":0.5,\n",
" \"stop_sequences\":[]\n",
" },\n",
" modelId=\"anthropic.claude-v1\", \n",
" modelId=\"anthropic.claude-v2\", \n",
" accept=accept, \n",
" contentType=contentType)\n",
"\n",
Expand All @@ -161,7 +161,8 @@
"outputs": [],
"source": [
"prompt = \"\"\"\n",
"Please provide a summary of the following text.\n",
"\n",
"Human: Please provide a summary of the following text.\n",
"\n",
"AWS took all of that feedback from customers, and today we are excited to announce Amazon Bedrock, \\\n",
"a new service that makes FMs from AI21 Labs, Anthropic, Stability AI, and Amazon accessible via an API. \\\n",
Expand All @@ -174,7 +175,7 @@
"tools and capabilities they are familiar with, without having to manage any infrastructure (including integrations \\\n",
"with Amazon SageMaker ML features like Experiments to test different models and Pipelines to manage their FMs at scale).\n",
"\n",
"\"\"\""
"Assistant:\"\"\""
]
},
{
Expand Down Expand Up @@ -224,7 +225,7 @@
},
"outputs": [],
"source": [
"modelId = 'anthropic.claude-v1' # change this to use a different version from the model provider\n",
"modelId = 'anthropic.claude-v2' # change this to use a different version from the model provider\n",
"accept = 'application/json'\n",
"contentType = 'application/json'\n",
"\n",
Expand Down
20 changes: 14 additions & 6 deletions 03_QuestionAnswering/01_qa_w_rag_claude.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@
"- `ai21.j2-grande-instruct`\n",
"- `ai21.j2-jumbo-instruct`\n",
"- `anthropic.claude-instant-v1`\n",
"- `anthropic.claude-v1`"
"- `anthropic.claude-v2`"
]
},
{
Expand All @@ -185,7 +185,7 @@
"from langchain.llms.bedrock import Bedrock\n",
"\n",
"# - create the Anthropic Model\n",
"llm = Bedrock(model_id=\"anthropic.claude-v1\", client=boto3_bedrock, model_kwargs={'max_tokens_to_sample':200})\n",
"llm = Bedrock(model_id=\"anthropic.claude-v2\", client=boto3_bedrock, model_kwargs={'max_tokens_to_sample':200})\n",
"bedrock_embeddings = BedrockEmbeddings(client=boto3_bedrock)"
]
},
Expand Down Expand Up @@ -386,7 +386,7 @@
"### Quick way\n",
"You have the possibility to use the wrapper provided by LangChain which wraps around the Vector Store and takes input the LLM.\n",
"This wrapper performs the following steps behind the scences:\n",
"- Takes input the question\n",
"- Take the question as input\n",
"- Create question embedding\n",
"- Fetch relevant documents\n",
"- Stuff the documents and the question into a prompt\n",
Expand Down Expand Up @@ -436,7 +436,7 @@
"### Customisable option\n",
"In the above scenario you explored the quick and easy way to get a context-aware answer to your question. Now let's have a look at a more customizable option with the helpf of [RetrievalQA](https://python.langchain.com/en/latest/modules/chains/index_examples/vector_db_qa.html) where you can customize how the documents fetched should be added to prompt using `chain_type` parameter. Also, if you want to control how many relevant documents should be retrieved then change the `k` parameter in the cell below to see different outputs. In many scenarios you might want to know which were the source documents that the LLM used to generate the answer, you can get those documents in the output using `return_source_documents` which returns the documents that are added to the context of the LLM prompt. `RetrievalQA` also allows you to provide a custom [prompt template](https://python.langchain.com/en/latest/modules/prompts/prompt_templates/getting_started.html) which can be specific to the model.\n",
"\n",
"Note: In this example we are using Anthropic Claude as the LLM under Amazon Bedrock, this particular model performs best if the inputs are provided under `Human:` and the model is requested to generate an output after `Assistant:`. In the cell below you see an example of how to control the prompt such that the LLM stays grounded and doesn't answer outside the context."
"Note: In this example we are using Anthropic Claude as the LLM under Amazon Bedrock. This particular model [performs best](https://docs.anthropic.com/claude/docs/human-and-assistant-formatting) if the inputs are provided under `Human:` and the model is requested to generate an output after `Assistant:`. In the cell below you see an example of how to control the prompt such that the LLM stays grounded and doesn't answer outside the context."
]
},
{
Expand All @@ -448,11 +448,14 @@
"from langchain.chains import RetrievalQA\n",
"from langchain.prompts import PromptTemplate\n",
"\n",
"prompt_template = \"\"\"Human: Use the following pieces of context to provide a concise answer to the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
"prompt_template = \"\"\"\n",
"\n",
"Human: Use the following pieces of context to provide a concise answer to the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\n",
"\n",
"{context}\n",
"\n",
"Question: {question}\n",
"\n",
"Assistant:\"\"\"\n",
"\n",
"PROMPT = PromptTemplate(\n",
Expand Down Expand Up @@ -487,7 +490,7 @@
"metadata": {},
"source": [
"## Conclusion\n",
"Congratulations on completing this moduel on retrieval augmented generation! This is an important technique that combines the power of large language models with the precision of retrieval methods. By augmenting generation with relevant retrieved examples, the responses we recieved become more coherent, consistent and grounded. You should feel proud of learning this innovative approach. I'm sure the knowledge you've gained will be very useful for building creative and engaging language generation systems. Well done!\n",
"Congratulations on completing this module on retrieval augmented generation! This is an important technique that combines the power of large language models with the precision of retrieval methods. By augmenting generation with relevant retrieved examples, the responses we recieved become more coherent, consistent and grounded. You should feel proud of learning this innovative approach. I'm sure the knowledge you've gained will be very useful for building creative and engaging language generation systems. Well done!\n",
"\n",
"In the above implementation of RAG based Question Answering we have explored the following concepts and how to implement them using Amazon Bedrock and it's LangChain integration.\n",
"\n",
Expand All @@ -504,6 +507,11 @@
"\n",
"# Thank You"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": []
}
],
"metadata": {
Expand Down
8 changes: 4 additions & 4 deletions 04_Chatbot/00_Chatbot_Claude.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@
"from langchain.memory import ConversationBufferMemory\n",
"\n",
"cl_llm = Bedrock(\n",
" model_id=\"anthropic.claude-v1\",\n",
" model_id=\"anthropic.claude-v2\",\n",
" client=boto3_bedrock,\n",
" model_kwargs={\"max_tokens_to_sample\": 1000},\n",
")\n",
Expand All @@ -172,7 +172,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"What happens here? We said \"Hi there!\" and the model spat out a several conversations. This is due to the fact that the default prompt used by Langchain ConversationChain is not well designed for Claude. An [effective Claude prompt](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design) should end with `\\n\\nHuman\\n\\nAassistant:`. Let's fix this.\n",
"What happens here? We said \"Hi there!\" and the model spat out a several conversations. This is due to the fact that the default prompt used by Langchain ConversationChain is not well designed for Claude. An [effective Claude prompt](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design) should contain `\\n\\nHuman:` at the beginning and also contain `\\n\\nAssistant:` in the prompt sometime after the `\\n\\nHuman:` (optionally followed by other text that you want to [put in Claude's mouth](https://docs.anthropic.com/claude/docs/human-and-assistant-formatting#use-human-and-assistant-to-put-words-in-claudes-mouth)). Let's fix this.\n",
"\n",
"To learn more about how to write prompts for Claude, check [Anthropic documentation](https://docs.anthropic.com/claude/docs/introduction-to-prompt-design)."
]
Expand Down Expand Up @@ -410,7 +410,7 @@
"memory = ConversationBufferMemory()\n",
"memory.chat_memory.add_user_message(\"You will be acting as a career coach. Your goal is to give career advice to users\")\n",
"memory.chat_memory.add_ai_message(\"I am a career coach and give career advice\")\n",
"cl_llm = Bedrock(model_id=\"anthropic.claude-v1\",client=boto3_bedrock)\n",
"cl_llm = Bedrock(model_id=\"anthropic.claude-v2\",client=boto3_bedrock)\n",
"conversation = ConversationChain(\n",
" llm=cl_llm, verbose=True, memory=memory\n",
")\n",
Expand Down Expand Up @@ -713,7 +713,7 @@
"Assistant: Question:\"\"\")\n",
"\n",
"# recreate the Claude LLM with more tokens to sample - this provides longer responses but introduces some latency\n",
"cl_llm = Bedrock(model_id=\"anthropic.claude-v1\", client=boto3_bedrock, model_kwargs={\"max_tokens_to_sample\": 500})\n",
"cl_llm = Bedrock(model_id=\"anthropic.claude-v2\", client=boto3_bedrock, model_kwargs={\"max_tokens_to_sample\": 500})\n",
"memory_chain = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n",
"qa = ConversationalRetrievalChain.from_llm(\n",
" llm=cl_llm, \n",
Expand Down
2 changes: 2 additions & 0 deletions 06_CodeGeneration/00_code_generatation_w_bedrock.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -213,6 +213,7 @@
"# Analyzing sales\n",
"\n",
"prompt_data = \"\"\"\n",
"\n",
"Human: You have a CSV, sales.csv, with columns:\n",
"- date (YYYY-MM-DD)\n",
"- product_id\n",
Expand All @@ -227,6 +228,7 @@
"- Visualize monthly sales using a bar chart\n",
"\n",
"Ensure the code is syntactically correct, bug-free, optimized, not span multiple lines unnessarily, and prefer to use standard libraries. Return only python code without any surrounding text, explanation or context.\n",
"\n",
"Assistant:\n",
"\"\"\""
]
Expand Down
9 changes: 6 additions & 3 deletions 06_CodeGeneration/01_sql_query_generate_w_bedrock.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -131,13 +131,14 @@
"source": [
"# create the prompt to generate SQL query\n",
"prompt_data = \"\"\"\n",
"Command: Human: AnyCompany has a database with a table named sales_data containing sales records. The table has following columns:\n",
"\n",
"Human: AnyCompany has a database with a table named sales_data containing sales records. The table has following columns:\n",
"- date (YYYY-MM-DD)\n",
"- product_id\n",
"- price\n",
"- units_sold\n",
"\n",
"Can you generate SQL queries for below: \n",
"Can you generate SQL queries for the below: \n",
"- Identify the top 5 best selling products by total sales for the year 2023\n",
"- Calculate the monthly average sales for the year 2023\n",
"\n",
Expand Down Expand Up @@ -227,7 +228,9 @@
"outputs": [],
"source": [
"# create the prompt\n",
"prompt_sql_data = \"\"\"Command: You're provided with a database schema representing any hospital's patient management system.\n",
"prompt_sql_data = \"\"\"\n",
"\n",
"Human: You're provided with a database schema representing any hospital's patient management system.\n",
"The system holds records about patients, their prescriptions, doctors, and the medications prescribed.\n",
"\n",
"Here's the schema:\n",
Expand Down
18 changes: 11 additions & 7 deletions 06_CodeGeneration/02_code_interpret_w_langchain.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -130,8 +130,8 @@
"- amazon.titan-tg1-large\n",
"- ai21.j2-grande-instruct\n",
"- ai21.j2-jumbo-instruct\n",
"- anthropic.claude-instant-v1\n",
"- anthropic.claude-v1\n",
"- anthropic.claude-instant-v2\n",
"- anthropic.claude-v2\n",
"\n",
"Note that different models support different `model_kwargs`."
]
Expand Down Expand Up @@ -275,11 +275,15 @@
"# Create a prompt template that has multiple input variables\n",
"multi_var_prompt = PromptTemplate(\n",
" input_variables=[\"code\", \"programmingLanguage\"], \n",
" template=\"\"\"Human: You will be acting as an expert software developer in {programmingLanguage}. \n",
" You will explain below code and highlight if any red flags or not following best practices.\n",
" {code}\n",
" Assistant: \n",
" \"\"\"\n",
" template=\"\"\"\n",
"\n",
"Human: You will be acting as an expert software developer in {programmingLanguage}. \n",
"You will explain the below code and highlight if there are any red flags or where best practices are not being followed.\n",
"<code>\n",
"{code}\n",
"</code>\n",
"\n",
"Assistant:\"\"\"\n",
")\n",
"\n",
"# Pass in values to the input variables\n",
Expand Down
18 changes: 11 additions & 7 deletions 06_CodeGeneration/03_code_translate_w_langchain.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,8 @@
"- amazon.titan-tg1-large\n",
"- ai21.j2-grande-instruct\n",
"- ai21.j2-jumbo-instruct\n",
"- anthropic.claude-instant-v1\n",
"- anthropic.claude-v1\n",
"- anthropic.claude-instant-v2\n",
"- anthropic.claude-v2\n",
"\n",
"Note that different models support different `model_kwargs`."
]
Expand Down Expand Up @@ -274,11 +274,15 @@
"# Create a prompt template that has multiple input variables\n",
"multi_var_prompt = PromptTemplate(\n",
" input_variables=[\"code\", \"srcProgrammingLanguage\", \"targetProgrammingLanguage\"], \n",
" template=\"\"\"Human: You will be acting as an expert software developer in {srcProgrammingLanguage} and {targetProgrammingLanguage}. \n",
" You will tranlslate below code from {srcProgrammingLanguage} to {targetProgrammingLanguage} while following coding best practices.\n",
" {code}\n",
" Assistant: \n",
" \"\"\"\n",
" template=\"\"\"\n",
"\n",
"Human: You will be acting as an expert software developer in {srcProgrammingLanguage} and {targetProgrammingLanguage}. \n",
"You will tranlslate below code from {srcProgrammingLanguage} to {targetProgrammingLanguage} while following coding best practices.\n",
"<code>\n",
"{code}\n",
"</code>\n",
"\n",
"Assistant: \"\"\"\n",
")\n",
"\n",
"# Pass in values to the input variables\n",
Expand Down

0 comments on commit ef54f91

Please sign in to comment.