Skip to content

Commit c16a334

Browse files
committed
Udpated
1 parent 246347a commit c16a334

File tree

4 files changed

+109
-0
lines changed

4 files changed

+109
-0
lines changed

AWS_BedRock/Image.py

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import boto3
2+
import json
3+
# Create a Boto3 client for Bedrock Runtime
4+
5+
client = boto3.client('bedrock-runtime')
6+
7+
# Specify the input data and model ID
8+
input_data = {
9+
"modelId": "stability.stable-diffusion-xl-v0",
10+
"contentType": "application/json",
11+
"accept": "application/json",
12+
"body": {"text_prompts":[{"text":"Sri lanka tea plantation."}],"cfg_scale":10,"seed":0,"steps":50}
13+
}
14+
15+
# Invoke the model for inference
16+
response = client.invoke_model(contentType='application/json', body=json.dumps(input_data["body"]).encode('utf-8'), modelId=input_data['modelId'])
17+
18+
# Retrieve the inference response
19+
inference_result = response['body'].read().decode('utf-8')
20+
21+
print(inference_result)

AWS_BedRock/chat.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import boto3
2+
import json
3+
import base64
4+
5+
client = boto3.client('bedrock-runtime')
6+
7+
# Specify the input data and model ID
8+
"""
9+
input_data = {
10+
"modelId": "ai21.j2-mid-v1",
11+
"contentType": "application/json",
12+
"accept": "*/*",
13+
"body": "{\"prompt\":\"how to cook tomato rice\",\"maxTokens\":200,\"temperature\":0.7,\"topP\":1,\"stopSequences\":[],\"countPenalty\":{\"scale\":0},\"presencePenalty\":{\"scale\":0},\"frequencyPenalty\":{\"scale\":0}}"
14+
}
15+
"""
16+
17+
input_data = {
18+
"modelId": "ai21.j2-ultra-v1",
19+
"contentType": "application/json",
20+
"accept": "*/*",
21+
"body": {"prompt":"this is where you place your input text","maxTokens":200,"temperature":0,"topP":250,"stop_sequences":[],"countPenalty":{"scale":0},"presencePenalty":{"scale":0},"frequencyPenalty":{"scale":0}}
22+
}
23+
24+
# Invoke the model for inference
25+
response = client.invoke_model(contentType='application/json', body=json.dumps(input_data["body"]).encode('utf-8'), modelId=input_data['modelId'])
26+
27+
Data = json.loads(response['body'].read().decode('utf-8'))
28+
print(Data)
29+
#Retrieve the inference response
30+
#print(Data['completions'][0]['data']['text'])
31+
#print(Data['generations'][0]['text'])
32+

AWS_BedRock/playAI.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import boto3
2+
import json
3+
import base64
4+
5+
client = boto3.client('bedrock-runtime')
6+
7+
# Specify the input data and model ID
8+
9+
input_data = {
10+
"modelId": "ai21.j2-mid-v1",
11+
"contentType": "application/json",
12+
"accept": "*/*",
13+
"body": "{\"prompt\":\"how to cook tomato rice\",\"maxTokens\":200,\"temperature\":0.7,\"topP\":1,\"stopSequences\":[],\"countPenalty\":{\"scale\":0},\"presencePenalty\":{\"scale\":0},\"frequencyPenalty\":{\"scale\":0}}"
14+
}
15+
16+
# Invoke the model for inference
17+
response = client.invoke_model(contentType='application/json', body=input_data['body'], modelId=input_data['modelId'])
18+
19+
Data = json.loads(response['body'].read().decode('utf-8'))
20+
print(Data)
21+
#Retrieve the inference response
22+
#print(Data['completions'][0]['data']['text'])
23+
#print(Data['generations'][0]['text'])
24+

AWS_BedRock/text.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import boto3
2+
import json
3+
import base64
4+
5+
client = boto3.client('bedrock-runtime')
6+
7+
# Specify the input data and model ID
8+
input_data = {
9+
"modelId": "ai21.j2-mid-v1",
10+
"contentType": "application/json",
11+
"accept": "*/*",
12+
"body": "{\"prompt\":\"how to cook tomato rice\",\"maxTokens\":200,\"temperature\":0.7,\"topP\":1,\"stopSequences\":[],\"countPenalty\":{\"scale\":0},\"presencePenalty\":{\"scale\":0},\"frequencyPenalty\":{\"scale\":0}}"
13+
}
14+
"""
15+
16+
input_data = {
17+
"modelId": "cohere.command-text-v14",
18+
"contentType": "application/json",
19+
"accept": "*/*",
20+
"body": "{\"prompt\":\"how to cook tomato rice\",\"max_tokens\":4000,\"temperature\":0.75,\"p\":0.01,\"k\":0,\"stop_sequences\":[],\"return_likelihoods\":\"NONE\"}"
21+
}
22+
"""
23+
# Invoke the model for inference
24+
response = client.invoke_model(contentType='application/json', body=input_data['body'], modelId=input_data['modelId'])
25+
26+
Data = json.loads(response['body'].read().decode('utf-8'))
27+
28+
print(Data)
29+
#Retrieve the inference response
30+
print(Data['completions'][0]['data']['text'])
31+
#print(Data['generations'][0]['text'])
32+

0 commit comments

Comments
 (0)