Why is Amazon Bedrock Considered a Legacy Provider? #1541
-
On the Provider Docs site, Amazon Bedrock is considered a legacy provider. Why is that? Are there any plans to make it a first class citizen so that it'll move under the AI SDK Providers heading? I have a requirement to use Amazon Bedrock. Would like to use ai-sdk but want to be aware of any limitations using it with Amazon Bedrock. CC: @lgrammel and @MaxLeiter based on #750 |
Beta Was this translation helpful? Give feedback.
Replies: 3 comments 9 replies
-
You can use the existing Bedrock provider. It uses our legacy provider api approach that is not compatible with the new AI SDK Core, but it works as is with useChat and useCompletion. |
Beta Was this translation helpful? Give feedback.
-
This is my peasant level initial attempt. I haven't tested this yet. Perhaps someone else can take over from my lazy ass?
import type { AiCoreProvider } from './ai-core.interface';
import { BedrockChatLanguageModel, bedrock } from '@somethingsomethingsomething/bedrock';
export class AwsBedrockProvider implements AiCoreProvider {
name = 'AWS Bedrock';
private bedrock = bedrock(this.options);
constructor(private options: {
region: string;
accessKeyId: string;
secretAccessKey: string;
}) {}
createModel(modelName: string): BedrockChatLanguageModel {
return this.bedrock(modelName);
}
}
import type { LanguageModelV1 } from '@ai-sdk/provider';
import {
BedrockRuntimeClient,
ConverseCommand,
ConverseStreamCommand,
type ConverseStreamCommandOutput,
type ConverseStreamOutput,
type Message,
type StopReason,
} from '@aws-sdk/client-bedrock-runtime';
import {
DoGenerateInput,
DoGenerateResult,
DoStreamInput,
DoStreamResult,
DoStreamPart,
} from './types';
export function bedrock(options: {
region: string;
accessKeyId: string;
secretAccessKey: string;
}) {
const client = new BedrockRuntimeClient({
region: options.region,
credentials: {
accessKeyId: options.accessKeyId,
secretAccessKey: options.secretAccessKey,
},
});
return (modelName: string) => new BedrockChatLanguageModel(modelName, client);
}
/**
* A Bedrock language model.
*/
export class BedrockChatLanguageModel implements LanguageModelV1 {
readonly specificationVersion = 'v1';
readonly defaultObjectGenerationMode = 'json';
constructor(
public readonly modelId: string,
private readonly client: BedrockRuntimeClient,
) {}
get provider(): string {
return 'AWS Bedrock';
}
async doGenerate({ prompt, ...options }: DoGenerateInput) {
const messages = filterBedrockMessages(prompt);
const command = new ConverseCommand({
modelId: this.modelId,
messages,
});
const response = await this.client.send(command);
const content = response.output.message.content[0];
/**
* Is this part even correct? I'm not sure.
*/
const output: DoGenerateResult = {
text: content.text,
finishReason: mapStopReason(response.stopReason),
usage: {
promptTokens: response.usage.inputTokens,
completionTokens: response.usage.outputTokens,
},
rawCall: {
rawPrompt: prompt,
rawSettings: options,
},
rawResponse: {},
toolCalls: response.additionalModelResponseFields['tool_calls'],
};
return output;
}
/**
* TODO: Implement this properly
*/
async doStream({ prompt, ...options }: DoStreamInput) {
const messages = filterBedrockMessages(prompt);
const command = new ConverseStreamCommand({
modelId: this.modelId,
messages,
});
const response = await this.client.send(command);
const stream = createBedrockResponseStream(response);
const output: DoStreamResult = {
rawCall: {
rawPrompt: prompt,
rawSettings: options,
},
stream,
rawResponse: {},
warnings: [],
};
return output;
}
}
function createBedrockResponseStream(input: ConverseStreamCommandOutput) {
const stream = createResponseStream<DoStreamPart>(
input,
(chunk, controller) => {
if (chunk.throttlingException) {
controller.enqueue({
type: 'error',
error: chunk.throttlingException,
});
return;
}
if (chunk.modelStreamErrorException) {
controller.enqueue({
type: 'error',
error: chunk.modelStreamErrorException,
});
return;
}
if (chunk.internalServerException) {
controller.enqueue({
type: 'error',
error: chunk.internalServerException,
});
return;
}
if (chunk.validationException) {
controller.enqueue({
type: 'error',
error: chunk.validationException,
});
return;
}
if (chunk.metadata) {
return;
}
if (chunk.$unknown) {
controller.enqueue({
type: 'error',
error: new Error('Unknown error'),
});
return;
}
if (chunk.contentBlockDelta) {
controller.enqueue({
type: 'text-delta',
textDelta: chunk.contentBlockDelta.delta.text,
});
return;
}
if (chunk.contentBlockStop) {
controller.enqueue({
type: 'finish',
finishReason: 'stop',
usage: {
promptTokens: undefined,
completionTokens: undefined,
},
});
return;
}
},
);
return stream;
}
function createResponseStream<T>(
input: ConverseStreamCommandOutput,
map: (
part: ConverseStreamOutput,
controller: ReadableStreamDefaultController<T>,
) => void,
) {
const stream = new ReadableStream<T>({
async start(controller) {
for await (const item of input.stream) {
map(item, controller);
}
},
});
return stream;
}
/**
* Bedrock doesn't support messages other than user and assistant.
*/
function filterBedrockMessages(messages: unknown[]): Message[] {
const output: Message[] = [];
for (const message of messages) {
if (isBedrockMessage(message)) {
output.push(message);
}
}
return output;
}
/**
* Bedrock messages only have a role of 'user' or 'assistant'.
*/
function isBedrockMessage(message: unknown): message is Message {
if (typeof message !== 'object' || message === null) {
return false;
}
if (!('role' in message)) {
return false;
}
return message.role === 'user' || message.role === 'assistant';
}
/**
* Vercel AI Stop Reasons are not the same as BedRock stop reasons.
*
* Are these even correct? I'm not sure.
*/
function mapStopReason(reason: StopReason): DoGenerateResult['finishReason'] {
if (reason === 'end_turn') {
return 'unknown';
}
if (reason === 'max_tokens') {
return 'unknown';
}
if (reason === 'stop_sequence') {
return 'stop';
}
if (reason === 'content_filtered') {
return 'content-filter';
}
if (reason === 'tool_use') {
return 'tool-calls';
}
}
import type { LanguageModelV1 } from '@ai-sdk/provider';
type Result<T extends 'doGenerate' | 'doStream'> = Awaited<
ReturnType<LanguageModelV1[T]>
>;
type Input<T extends 'doGenerate' | 'doStream'> = Parameters<
LanguageModelV1[T]
>[0];
export type DoGenerateResult = Result<'doGenerate'>;
export type DoGenerateInput = Input<'doGenerate'>;
type LanguageModelV1FinishReason =
| 'stop'
| 'length'
| 'content-filter'
| 'tool-calls'
| 'error'
| 'other'
| 'unknown';
export type DoStreamResult = Result<'doStream'>;
type LanguageModelV1LogProbs = Array<{
token: string;
logprob: number;
topLogprobs: Array<{
token: string;
logprob: number;
}>;
}>;
type LanguageModelV1FunctionToolCall = {
toolCallType: 'function';
toolCallId: string;
toolName: string;
/**
Stringified JSON object with the tool call arguments. Must match the
parameters schema of the tool.
*/
args: string;
};
export type DoStreamPart =
| {
type: 'text-delta';
textDelta: string;
}
| ({
type: 'tool-call';
} & LanguageModelV1FunctionToolCall)
| {
type: 'tool-call-delta';
toolCallType: 'function';
toolCallId: string;
toolName: string;
argsTextDelta: string;
}
| {
type: 'finish';
finishReason: LanguageModelV1FinishReason;
logprobs?: LanguageModelV1LogProbs;
usage: {
promptTokens: number;
completionTokens: number;
};
}
| {
type: 'error';
error: unknown;
};
export type DoStreamInput = Input<'doStream'>; |
Beta Was this translation helpful? Give feedback.
-
We have a Bedrock provider for AI SDK 3.1 now: https://sdk.vercel.ai/providers/ai-sdk-providers/amazon-bedrock |
Beta Was this translation helpful? Give feedback.
We have a Bedrock provider for AI SDK 3.1 now: https://sdk.vercel.ai/providers/ai-sdk-providers/amazon-bedrock