bolt.new/app/routes/api.chat.ts
Maki d86eaa420e 🚀 [feat] Integrate Amazon Bedrock support
Add support for Amazon Bedrock models, including:

- Implement AWS credentials retrieval for Bedrock
- Add Bedrock model initialization and handling
- Include Claude 3 models (Opus, Sonnet, Haiku) for Bedrock
- Adjust token limits for Bedrock models
- Update chat action to support model selection
- Add @ai-sdk/amazon-bedrock dependency

Key changes:
- app/lib/.server/llm/api-key.ts: Add getAWSCredentials function
- app/lib/.server/llm/constants.ts: Define MAX_TOKENS_BEDROCK
- app/lib/.server/llm/model.ts: Implement getBedrockModel function
- app/lib/.server/llm/stream-text.ts: Use Bedrock-specific token limit
- app/routes/api.chat.ts: Update to support model selection
- app/utils/constants.ts: Add Bedrock model options
- package.json: Add @ai-sdk/amazon-bedrock dependency
- pnpm-lock.yaml: Update with new dependencies
2024-10-17 20:46:17 +09:00

62 lines
2.0 KiB
TypeScript

// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
import SwitchableStream from '~/lib/.server/llm/switchable-stream';
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
}
async function chatAction({ context, request }: ActionFunctionArgs) {
const { messages, selectedModel } = await request.json<{ messages: Messages; selectedModel?: string }>();
const stream = new SwitchableStream();
try {
const options: StreamingOptions = {
toolChoice: 'none',
onFinish: async ({ text: content, finishReason }) => {
if (finishReason !== 'length') {
return stream.close();
}
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant', content });
messages.push({ role: 'user', content: CONTINUE_PROMPT });
const result = await streamText(messages, context.cloudflare.env, options);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamText(messages, context.cloudflare.env, options);
stream.switchSource(result.toAIStream());
return new Response(stream.readable, {
status: 200,
headers: {
'Content-Type': 'text/plain; charset=utf-8',
},
});
} catch (error) {
console.log(error);
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
}