From dfd21d0422ab799fdb4fca456451a3e87ef0cabe Mon Sep 17 00:00:00 2001 From: Stainless Bot <107565488+stainless-bot@users.noreply.github.com> Date: Mon, 11 Dec 2023 10:24:10 +0000 Subject: [PATCH] chore(internal): update test examples (#98) --- README.md | 16 +++--- .../resources/completions.py | 56 ++++++++++++++++--- .../types/completion_create_params.py | 9 ++- tests/api_resources/test_completions.py | 24 ++++---- tests/test_client.py | 24 ++++---- 5 files changed, 88 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index b970465..6690b80 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ client = AnthropicBedrock( ) completion = client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt=f"{anthropic_bedrock.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic_bedrock.AI_PROMPT}", ) @@ -62,7 +62,7 @@ client = AsyncAnthropicBedrock() async def main(): completion = await client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt=f"{anthropic_bedrock.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic_bedrock.AI_PROMPT}", ) @@ -86,7 +86,7 @@ client = AnthropicBedrock() stream = client.completions.create( prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}", max_tokens_to_sample=300, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", stream=True, ) for completion in stream: @@ -103,7 +103,7 @@ client = AsyncAnthropicBedrock() stream = await client.completions.create( prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}", max_tokens_to_sample=300, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", stream=True, ) async for completion in stream: @@ -147,7 +147,7 @@ try: client.completions.create( prompt=f"{anthropic_bedrock.HUMAN_PROMPT} Your prompt here {anthropic_bedrock.AI_PROMPT}", max_tokens_to_sample=256, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", ) except anthropic_bedrock.APIConnectionError as e: print("The server could not be reached") @@ -194,7 +194,7 @@ client = AnthropicBedrock( client.with_options(max_retries=5).completions.create( prompt=f"{HUMAN_PROMPT} Can you help me effectively ask for a raise at work?{AI_PROMPT}", max_tokens_to_sample=300, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", ) ``` @@ -221,7 +221,7 @@ client = AnthropicBedrock( client.with_options(timeout=5 * 1000).completions.create( prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}", max_tokens_to_sample=300, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", ) ``` @@ -265,7 +265,7 @@ client = AnthropicBedrock() response = client.completions.with_raw_response.create( prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}", max_tokens_to_sample=300, - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", ) print(response.headers.get('X-My-Header')) diff --git a/src/anthropic_bedrock/resources/completions.py b/src/anthropic_bedrock/resources/completions.py index e95752b..9231357 100644 --- a/src/anthropic_bedrock/resources/completions.py +++ b/src/anthropic_bedrock/resources/completions.py @@ -32,7 +32,12 @@ def __init__(self, client: AnthropicBedrock) -> None: def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -108,7 +113,12 @@ def create( def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stream: Literal[True], @@ -184,7 +194,12 @@ def create( def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stream: bool, @@ -260,7 +275,12 @@ def create( def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -309,7 +329,12 @@ def __init__(self, client: AsyncAnthropicBedrock) -> None: async def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stop_sequences: List[str] | NotGiven = NOT_GIVEN, @@ -385,7 +410,12 @@ async def create( async def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stream: Literal[True], @@ -461,7 +491,12 @@ async def create( async def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stream: bool, @@ -537,7 +572,12 @@ async def create( async def create( self, *, - model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]], + model: Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ], max_tokens_to_sample: int, prompt: str, stop_sequences: List[str] | NotGiven = NOT_GIVEN, diff --git a/src/anthropic_bedrock/types/completion_create_params.py b/src/anthropic_bedrock/types/completion_create_params.py index 9d9ffba..90d4af3 100644 --- a/src/anthropic_bedrock/types/completion_create_params.py +++ b/src/anthropic_bedrock/types/completion_create_params.py @@ -9,7 +9,14 @@ class CompletionCreateParamsBase(TypedDict, total=False): - model: Required[Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]]] + model: Required[ + Union[ + str, + Literal[ + "anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1" + ], + ] + ] max_tokens_to_sample: Required[int] """The maximum number of tokens to generate before stopping. diff --git a/tests/api_resources/test_completions.py b/tests/api_resources/test_completions.py index 2d23537..ed6afc3 100644 --- a/tests/api_resources/test_completions.py +++ b/tests/api_resources/test_completions.py @@ -37,7 +37,7 @@ class TestCompletions: @parametrize def test_method_create_overload_1(self, client: AnthropicBedrock) -> None: completion = client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -46,7 +46,7 @@ def test_method_create_overload_1(self, client: AnthropicBedrock) -> None: @parametrize def test_method_create_with_all_params_overload_1(self, client: AnthropicBedrock) -> None: completion = client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stop_sequences=["string", "string", "string"], @@ -60,7 +60,7 @@ def test_method_create_with_all_params_overload_1(self, client: AnthropicBedrock @parametrize def test_raw_response_create_overload_1(self, client: AnthropicBedrock) -> None: response = client.completions.with_raw_response.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -71,7 +71,7 @@ def test_raw_response_create_overload_1(self, client: AnthropicBedrock) -> None: @parametrize def test_method_create_overload_2(self, client: AnthropicBedrock) -> None: client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, @@ -80,7 +80,7 @@ def test_method_create_overload_2(self, client: AnthropicBedrock) -> None: @parametrize def test_method_create_with_all_params_overload_2(self, client: AnthropicBedrock) -> None: client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, @@ -93,7 +93,7 @@ def test_method_create_with_all_params_overload_2(self, client: AnthropicBedrock @parametrize def test_raw_response_create_overload_2(self, client: AnthropicBedrock) -> None: response = client.completions.with_raw_response.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, @@ -122,7 +122,7 @@ class TestAsyncCompletions: @parametrize async def test_method_create_overload_1(self, client: AsyncAnthropicBedrock) -> None: completion = await client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -131,7 +131,7 @@ async def test_method_create_overload_1(self, client: AsyncAnthropicBedrock) -> @parametrize async def test_method_create_with_all_params_overload_1(self, client: AsyncAnthropicBedrock) -> None: completion = await client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stop_sequences=["string", "string", "string"], @@ -145,7 +145,7 @@ async def test_method_create_with_all_params_overload_1(self, client: AsyncAnthr @parametrize async def test_raw_response_create_overload_1(self, client: AsyncAnthropicBedrock) -> None: response = await client.completions.with_raw_response.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", ) @@ -156,7 +156,7 @@ async def test_raw_response_create_overload_1(self, client: AsyncAnthropicBedroc @parametrize async def test_method_create_overload_2(self, client: AsyncAnthropicBedrock) -> None: await client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, @@ -165,7 +165,7 @@ async def test_method_create_overload_2(self, client: AsyncAnthropicBedrock) -> @parametrize async def test_method_create_with_all_params_overload_2(self, client: AsyncAnthropicBedrock) -> None: await client.completions.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, @@ -178,7 +178,7 @@ async def test_method_create_with_all_params_overload_2(self, client: AsyncAnthr @parametrize async def test_raw_response_create_overload_2(self, client: AsyncAnthropicBedrock) -> None: response = await client.completions.with_raw_response.create( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=256, prompt="\n\nHuman: Hello, world!\n\nAssistant:", stream=True, diff --git a/tests/test_client.py b/tests/test_client.py index c04e5d7..a965b19 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -820,9 +820,9 @@ def raise_for_status(response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APITimeoutError): self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ), @@ -840,9 +840,9 @@ def raise_for_status(_response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APIConnectionError): self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ), @@ -861,9 +861,9 @@ def raise_for_status(response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APIStatusError): self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ), @@ -1660,9 +1660,9 @@ def raise_for_status(response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APITimeoutError): await self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ), @@ -1680,9 +1680,9 @@ def raise_for_status(_response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APIConnectionError): await self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ), @@ -1701,9 +1701,9 @@ def raise_for_status(response: httpx.Response) -> None: with mock.patch("httpx.Response.raise_for_status", raise_for_status): with pytest.raises(APIStatusError): await self.client.post( - "/model/anthropic.claude-v2/invoke", + "/model/anthropic.claude-v2:1/invoke", body=dict( - model="anthropic.claude-v2", + model="anthropic.claude-v2:1", max_tokens_to_sample=300, prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:", ),