Skip to content

Commit

Permalink
chore(internal): update test examples (#98)
Browse files Browse the repository at this point in the history
  • Loading branch information
stainless-bot committed Dec 13, 2023
1 parent 7e29e3f commit dfd21d0
Show file tree
Hide file tree
Showing 5 changed files with 88 additions and 41 deletions.
16 changes: 8 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ client = AnthropicBedrock(
)

completion = client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt=f"{anthropic_bedrock.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic_bedrock.AI_PROMPT}",
)
Expand All @@ -62,7 +62,7 @@ client = AsyncAnthropicBedrock()

async def main():
completion = await client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt=f"{anthropic_bedrock.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic_bedrock.AI_PROMPT}",
)
Expand All @@ -86,7 +86,7 @@ client = AnthropicBedrock()
stream = client.completions.create(
prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
max_tokens_to_sample=300,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
stream=True,
)
for completion in stream:
Expand All @@ -103,7 +103,7 @@ client = AsyncAnthropicBedrock()
stream = await client.completions.create(
prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
max_tokens_to_sample=300,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
stream=True,
)
async for completion in stream:
Expand Down Expand Up @@ -147,7 +147,7 @@ try:
client.completions.create(
prompt=f"{anthropic_bedrock.HUMAN_PROMPT} Your prompt here {anthropic_bedrock.AI_PROMPT}",
max_tokens_to_sample=256,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
)
except anthropic_bedrock.APIConnectionError as e:
print("The server could not be reached")
Expand Down Expand Up @@ -194,7 +194,7 @@ client = AnthropicBedrock(
client.with_options(max_retries=5).completions.create(
prompt=f"{HUMAN_PROMPT} Can you help me effectively ask for a raise at work?{AI_PROMPT}",
max_tokens_to_sample=300,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
)
```

Expand All @@ -221,7 +221,7 @@ client = AnthropicBedrock(
client.with_options(timeout=5 * 1000).completions.create(
prompt=f"{HUMAN_PROMPT} Where can I get a good coffee in my neighbourhood?{AI_PROMPT}",
max_tokens_to_sample=300,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
)
```

Expand Down Expand Up @@ -265,7 +265,7 @@ client = AnthropicBedrock()
response = client.completions.with_raw_response.create(
prompt=f"{HUMAN_PROMPT} Your prompt here{AI_PROMPT}",
max_tokens_to_sample=300,
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
)
print(response.headers.get('X-My-Header'))

Expand Down
56 changes: 48 additions & 8 deletions src/anthropic_bedrock/resources/completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,12 @@ def __init__(self, client: AnthropicBedrock) -> None:
def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -108,7 +113,12 @@ def create(
def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stream: Literal[True],
Expand Down Expand Up @@ -184,7 +194,12 @@ def create(
def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stream: bool,
Expand Down Expand Up @@ -260,7 +275,12 @@ def create(
def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -309,7 +329,12 @@ def __init__(self, client: AsyncAnthropicBedrock) -> None:
async def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down Expand Up @@ -385,7 +410,12 @@ async def create(
async def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stream: Literal[True],
Expand Down Expand Up @@ -461,7 +491,12 @@ async def create(
async def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stream: bool,
Expand Down Expand Up @@ -537,7 +572,12 @@ async def create(
async def create(
self,
*,
model: Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]],
model: Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
],
max_tokens_to_sample: int,
prompt: str,
stop_sequences: List[str] | NotGiven = NOT_GIVEN,
Expand Down
9 changes: 8 additions & 1 deletion src/anthropic_bedrock/types/completion_create_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@


class CompletionCreateParamsBase(TypedDict, total=False):
model: Required[Union[str, Literal["anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"]]]
model: Required[
Union[
str,
Literal[
"anthropic.claude-v2:1", "anthropic.claude-v2", "anthropic.claude-v1", "anthropic.claude-instant-v1"
],
]
]

max_tokens_to_sample: Required[int]
"""The maximum number of tokens to generate before stopping.
Expand Down
24 changes: 12 additions & 12 deletions tests/api_resources/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class TestCompletions:
@parametrize
def test_method_create_overload_1(self, client: AnthropicBedrock) -> None:
completion = client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
Expand All @@ -46,7 +46,7 @@ def test_method_create_overload_1(self, client: AnthropicBedrock) -> None:
@parametrize
def test_method_create_with_all_params_overload_1(self, client: AnthropicBedrock) -> None:
completion = client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stop_sequences=["string", "string", "string"],
Expand All @@ -60,7 +60,7 @@ def test_method_create_with_all_params_overload_1(self, client: AnthropicBedrock
@parametrize
def test_raw_response_create_overload_1(self, client: AnthropicBedrock) -> None:
response = client.completions.with_raw_response.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
Expand All @@ -71,7 +71,7 @@ def test_raw_response_create_overload_1(self, client: AnthropicBedrock) -> None:
@parametrize
def test_method_create_overload_2(self, client: AnthropicBedrock) -> None:
client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand All @@ -80,7 +80,7 @@ def test_method_create_overload_2(self, client: AnthropicBedrock) -> None:
@parametrize
def test_method_create_with_all_params_overload_2(self, client: AnthropicBedrock) -> None:
client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand All @@ -93,7 +93,7 @@ def test_method_create_with_all_params_overload_2(self, client: AnthropicBedrock
@parametrize
def test_raw_response_create_overload_2(self, client: AnthropicBedrock) -> None:
response = client.completions.with_raw_response.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand Down Expand Up @@ -122,7 +122,7 @@ class TestAsyncCompletions:
@parametrize
async def test_method_create_overload_1(self, client: AsyncAnthropicBedrock) -> None:
completion = await client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
Expand All @@ -131,7 +131,7 @@ async def test_method_create_overload_1(self, client: AsyncAnthropicBedrock) ->
@parametrize
async def test_method_create_with_all_params_overload_1(self, client: AsyncAnthropicBedrock) -> None:
completion = await client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stop_sequences=["string", "string", "string"],
Expand All @@ -145,7 +145,7 @@ async def test_method_create_with_all_params_overload_1(self, client: AsyncAnthr
@parametrize
async def test_raw_response_create_overload_1(self, client: AsyncAnthropicBedrock) -> None:
response = await client.completions.with_raw_response.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
)
Expand All @@ -156,7 +156,7 @@ async def test_raw_response_create_overload_1(self, client: AsyncAnthropicBedroc
@parametrize
async def test_method_create_overload_2(self, client: AsyncAnthropicBedrock) -> None:
await client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand All @@ -165,7 +165,7 @@ async def test_method_create_overload_2(self, client: AsyncAnthropicBedrock) ->
@parametrize
async def test_method_create_with_all_params_overload_2(self, client: AsyncAnthropicBedrock) -> None:
await client.completions.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand All @@ -178,7 +178,7 @@ async def test_method_create_with_all_params_overload_2(self, client: AsyncAnthr
@parametrize
async def test_raw_response_create_overload_2(self, client: AsyncAnthropicBedrock) -> None:
response = await client.completions.with_raw_response.create(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=256,
prompt="\n\nHuman: Hello, world!\n\nAssistant:",
stream=True,
Expand Down
24 changes: 12 additions & 12 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -820,9 +820,9 @@ def raise_for_status(response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APITimeoutError):
self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand All @@ -840,9 +840,9 @@ def raise_for_status(_response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APIConnectionError):
self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand All @@ -861,9 +861,9 @@ def raise_for_status(response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APIStatusError):
self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand Down Expand Up @@ -1660,9 +1660,9 @@ def raise_for_status(response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APITimeoutError):
await self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand All @@ -1680,9 +1680,9 @@ def raise_for_status(_response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APIConnectionError):
await self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand All @@ -1701,9 +1701,9 @@ def raise_for_status(response: httpx.Response) -> None:
with mock.patch("httpx.Response.raise_for_status", raise_for_status):
with pytest.raises(APIStatusError):
await self.client.post(
"/model/anthropic.claude-v2/invoke",
"/model/anthropic.claude-v2:1/invoke",
body=dict(
model="anthropic.claude-v2",
model="anthropic.claude-v2:1",
max_tokens_to_sample=300,
prompt="\n\nHuman:Where can I get a good coffee in my neighbourhood?\n\nAssistant:",
),
Expand Down

0 comments on commit dfd21d0

Please sign in to comment.