appearance tuning and cleanup

This commit is contained in:
Sean Robinson 2025-06-02 15:07:34 -07:00
parent 57cd69ddce
commit 043a0c2165
4 changed files with 97 additions and 521 deletions

View File

@ -2,4 +2,4 @@
export const MAX_TOKENS = 8192;
// limits the number of model responses that can be returned in a single request
export const MAX_RESPONSE_SEGMENTS = 2;
export const MAX_RESPONSE_SEGMENTS = 4;

View File

@ -1041,7 +1041,7 @@ As a reminder, all API endpoints are at https://staging.impromptu-labs.com .
- 'filter': Filter a list based on conditions
`;
export const INJECTED_PROMPT_1 = stripIndents`[INJECTED_PROMPT_1]
export const INJECTED_PROMPT_2 = stripIndents`[INJECTED_PROMPT_1]
Please review this API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.
As a reminder, all API endpoints are at https://staging.impromptu-labs.com .
@ -1084,6 +1084,7 @@ And remember the actual API functions you have access to, and what they expect:
Ensure that the necessary input and output controls are present to allow the user to run this code, sending in what they need to at the time.
Please also echo the exact API calls to teh screen for debuging as they happen.
Also, please add a green button to show the results, and a red button that will delete the objects produced by the code.
Remember to re-install and run npm run dev (using <boltAction type="shell"> ) after any changes.
`;
export const INJECTED_PROMPT_1_OLD = stripIndents`[INJECTED_PROMPT_1]
@ -1141,7 +1142,7 @@ Deletes an object and all its data.
// • Interface - A button for each API-calling step, with an indicator that shows when each step is finished and the result.`;
export const INJECTED_PROMPT_2 = stripIndents`[INJECTED_PROMPT_2] Change the style of the app using the set of instructions below that are most relevant to the user task:
export const INJECTED_PROMPT_1 = stripIndents`[INJECTED_PROMPT_2] Change the style of the app using the set of instructions below that are most relevant to the user task:
(For screens where users upload documents, extract structured data, and view outputs):
Generate a three-step Upload & Extract flow for seed-to-Series-B small-business brands.
@ -1168,5 +1169,5 @@ Design a three-column layout on desktop, single-column on mobile, for Upload + P
(General - for all workflows)
Do not use MUI icons, they break in this environment.
Please ensure that all text and windows have good contrast against their background.
Remember to re-install and run npm run dev after any changes.
Remember to re-install and run npm run dev (using <boltAction type="shell"> ) after any changes.
`;

View File

@ -7,17 +7,86 @@ import { streamText as _streamText, convertToCoreMessages } from 'ai';
import { getAPIKey } from '~/lib/.server/llm/api-key';
import { getAnthropicModel } from '~/lib/.server/llm/model';
const estimateTokens = (text: string): number => {
// Rough estimation: ~4 characters per token for English text
return Math.ceil((text || '').length / 4);
};
const manageContextWindow = (messages: Messages, maxTokens: number = 150000): Messages => {
// Calculate total tokens in current conversation
let totalTokens = messages.reduce((sum, msg) => {
return sum + estimateTokens(msg.content || '');
}, 0);
console.log(`Total tokens before management: ${totalTokens}`);
// If we're under the limit, return messages as-is
if (totalTokens <= maxTokens) {
return messages;
}
// Create a copy to avoid mutating the original
const managedMessages = [...messages];
// Always keep the first message (system context) and last few messages
const keepRecentCount = 6; // Keep last 6 messages for context
// Remove messages from the middle until we're under the token limit
while (totalTokens > maxTokens && managedMessages.length > keepRecentCount + 1) {
// Find the oldest non-system message to remove
let removeIndex = 1;
// Skip any critical messages at the beginning
while (removeIndex < managedMessages.length - keepRecentCount) {
const msg = managedMessages[removeIndex];
// Don't remove injected prompts or transition markers
if (msg.role === 'user' && (
msg.content.includes('[INJECTED_PROMPT_1]') ||
msg.content.includes('[INJECTED_PROMPT_2]')
)) {
removeIndex++;
continue;
}
if (msg.role === 'assistant' && msg.content.includes('[final]')) {
removeIndex++;
continue;
}
break;
}
if (removeIndex < managedMessages.length - keepRecentCount) {
const removedMessage = managedMessages.splice(removeIndex, 1)[0];
totalTokens -= estimateTokens(removedMessage.content || '');
console.log(`Removed message, tokens now: ${totalTokens}`);
} else {
break; // Safety break if we can't find anything to remove
}
}
console.log(`Context managed: ${messages.length - managedMessages.length} messages removed`);
console.log(`Final managed messages count: ${managedMessages.length}, tokens: ${totalTokens}`);
return managedMessages;
};
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
}
async function chatAction({ context, request }: ActionFunctionArgs) {
const { messages } = await request.json<{ messages: Messages }>();
// NEW: Also we changed "messages" to "managedMessages" after this
const managedMessages = manageContextWindow(messages, 180000);
const stream = new SwitchableStream();
try {
// Check if we've already transitioned to the original agent
const hasTransitioned = checkIfAlreadyTransitioned(messages);
const hasTransitioned = checkIfAlreadyTransitioned(managedMessages);
if (!hasTransitioned) {
// Use your agent first
@ -33,7 +102,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
console.log('Transition detected! Immediately injecting first prompt...');
// Add the assistant's response to messages
const updatedMessages: Messages = [...messages, { role: 'assistant' as const, content }];
const updatedMessages: Messages = [...managedMessages, { role: 'assistant' as const, content }];
// Inject the first prompt immediately
const injectedMessages = injectSinglePrompt(updatedMessages, 1);
@ -98,14 +167,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant' as const, content });
messages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
managedMessages.push({ role: 'assistant' as const, content });
managedMessages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamTextWithYourAgent(managedMessages, context.cloudflare.env, yourAgentOptions);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
const result = await streamTextWithYourAgent(managedMessages, context.cloudflare.env, yourAgentOptions);
stream.switchSource(result.toAIStream());
} else {
@ -122,14 +191,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant' as const, content });
messages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamText(messages, context.cloudflare.env, options);
managedMessages.push({ role: 'assistant' as const, content });
managedMessages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamText(managedMessages, context.cloudflare.env, options);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamText(messages, context.cloudflare.env, options);
const result = await streamText(managedMessages, context.cloudflare.env, options);
stream.switchSource(result.toAIStream());
}
@ -213,509 +282,3 @@ function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
return injectedMessages;
}
//////////////////////////
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// // Check if we've already transitioned to the original agent
// const hasTransitioned = checkIfAlreadyTransitioned(messages);
// if (!hasTransitioned) {
// // Use your agent first
// console.log('Using your agent...');
// // Create options with proper stream closing
// const yourAgentOptions: StreamingOptions = {
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// console.log('Your agent finished with reason:', finishReason);
// console.log('Response content:', content.substring(0, 100) + '...');
// // Check if we should transition to original agent
// if (checkIfShouldTransition(content)) {
// console.log('Transition detected - will switch on next message');
// }
// // Always close the stream when your agent finishes
// // (unless we need continuation due to length)
// if (finishReason !== 'length') {
// console.log('Closing stream - your agent finished');
// return stream.close();
// }
// // Handle continuation for length
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
// stream.switchSource(result.toAIStream());
// } else {
// // We've transitioned - check if we need to inject prompts
// const injectionStatus = checkIfNeedsPromptInjection(messages);
// if (injectionStatus.needsInjection) {
// console.log(`Injecting prompt ${injectionStatus.whichPrompt} before using original agent...`);
// // Inject the single prompt
// const injectedMessages = injectSinglePrompt(messages, injectionStatus.whichPrompt!);
// // Run through original agent with injected prompt
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// injectedMessages.push({ role: 'assistant', content });
// injectedMessages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// } else {
// // Normal original agent flow
// console.log('Using original agent...');
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// }
// }
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }
// // Updated helper function
// function streamTextWithYourAgent(messages: Messages, env: Env, options?: StreamingOptions) {
// return _streamText({
// model: getAnthropicModel(getAPIKey(env)),
// system: getYourAgentSystemPrompt(),
// maxTokens: MAX_TOKENS,
// headers: {
// 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
// },
// messages: convertToCoreMessages(messages),
// ...options, // This will include the onFinish callback we pass in
// });
// }
// function getYourAgentSystemPrompt(): string {
// // Return your custom system prompt
// // Include instruction to end with [final] when ready to transition
// return API_CHATBOT_PROMPT;;
// }
// function checkIfAlreadyTransitioned(messages: Messages): boolean {
// // Check if any assistant message contains [final]
// return messages.some(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// }
// function checkIfShouldTransition(responseText: string): boolean {
// return responseText.includes('[final]');
// }
// function checkIfNeedsPromptInjection(messages: Messages): { needsInjection: boolean; whichPrompt: 1 | 2 | null } {
// const transitionIndex = messages.findIndex(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// if (transitionIndex === -1) {
// console.log('No transition found, no injection needed');
// return { needsInjection: false, whichPrompt: null };
// }
// console.log('Transition found at index:', transitionIndex);
// // Check what we've already injected after transition
// const messagesAfterTransition = messages.slice(transitionIndex + 1);
// console.log('Messages after transition:', messagesAfterTransition.length);
// const prompt1Messages = messagesAfterTransition.filter(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const prompt2Messages = messagesAfterTransition.filter(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_2]')
// );
// console.log('Found prompt 1 messages:', prompt1Messages.length);
// console.log('Found prompt 2 messages:', prompt2Messages.length);
// if (prompt1Messages.length === 0) {
// console.log('Need to inject prompt 1');
// return { needsInjection: true, whichPrompt: 1 };
// } else if (prompt1Messages.length > 0 && prompt2Messages.length === 0) {
// // Check if we got a response to prompt 1
// const prompt1Index = messagesAfterTransition.findIndex(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasResponseToPrompt1 = messagesAfterTransition.slice(prompt1Index + 1).some(msg =>
// msg.role === 'assistant'
// );
// console.log('Has response to prompt 1:', hasResponseToPrompt1);
// if (hasResponseToPrompt1) {
// console.log('Need to inject prompt 2');
// return { needsInjection: true, whichPrompt: 2 };
// } else {
// console.log('Waiting for response to prompt 1');
// return { needsInjection: false, whichPrompt: null };
// }
// }
// console.log('All prompts already injected');
// return { needsInjection: false, whichPrompt: null };
// }
// function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
// const injectedMessages = [...messages];
// console.log('injecting a single prompt into messages:', messages);
// if (promptNumber === 1) {
// injectedMessages.push({
// role: 'user',
// content: '[INJECTED_PROMPT_1] Please review the API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.'
// });
// } else {
// injectedMessages.push({
// role: 'user',
// content: `[INJECTED_PROMPT_2] Rewrite the code using the Modernize Next-js Free design system:
// • Framework - Next.js 14 App Router + TypeScript
// • UI library - Material UI v5; style only with the sx prop and MUI components
// • Theme palette - primary #5D87FF, success #13DEB9, danger #FA896B, warning #FFAE1F
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main; keep shadow-1 cards and 12-col responsive grid
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • File structure - components in /package/src/components/, pages in /package/src/app/ with PascalCase files
// • Write all components as arrow functions, export default, and type props explicitly`
// });
// }
// return injectedMessages;
// }
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// // Check if we've already transitioned to the original agent
// const hasTransitioned = checkIfAlreadyTransitioned(messages);
// if (!hasTransitioned) {
// // Use your agent first
// console.log('Using your agent...');
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env);
// // Collect the streamed response to check for [final] token
// let fullResponse = '';
// const responseStream = result.toAIStream();
// // We need to capture the response as it streams
// // This is a bit tricky with streaming - we might need to modify this approach
// const transformStream = new TransformStream({
// transform(chunk, controller) {
// const text = new TextDecoder().decode(chunk);
// fullResponse += text;
// controller.enqueue(chunk);
// },
// flush() {
// // After streaming is complete, check if we should transition
// if (checkIfShouldTransition(fullResponse)) {
// // We need to handle transition after this stream completes
// // This might require a different approach - see note below
// }
// }
// });
// // For now, let's return the stream and handle transition on next message
// stream.switchSource(responseStream);
// } else {
// // We've transitioned - check if we need to inject prompts
// const injectionStatus = checkIfNeedsPromptInjection(messages);
// if (injectionStatus.needsInjection) {
// console.log(`Injecting prompt ${injectionStatus.whichPrompt} before using original agent...`);
// // Inject the single prompt
// const injectedMessages = injectSinglePrompt(messages, injectionStatus.whichPrompt!);
// // Run through original agent with injected prompt
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// // Handle continuation logic (same as original)
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// injectedMessages.push({ role: 'assistant', content });
// injectedMessages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// } else {
// // Normal original agent flow
// console.log('Using original agent...');
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// }
// }
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }
// // Helper functions
// function streamTextWithYourAgent(messages: Messages, env: Env, options?: StreamingOptions) {
// // For now, copy of the original streamText - you can modify later
// return _streamText({
// model: getAnthropicModel(getAPIKey(env)),
// system: getYourAgentSystemPrompt(), // You'll need to create this
// maxTokens: MAX_TOKENS,
// headers: {
// 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
// },
// messages: convertToCoreMessages(messages),
// ...options,
// });
// }
// function getYourAgentSystemPrompt(): string {
// // Return your custom system prompt
// // Include instruction to end with [final] when ready to transition
// return API_CHATBOT_PROMPT;
// }
// function checkIfAlreadyTransitioned(messages: Messages): boolean {
// // Check if any assistant message contains [final]
// return messages.some(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// }
// function checkIfShouldTransition(responseText: string): boolean {
// return responseText.includes('[final]');
// }
// function checkIfNeedsPromptInjection(messages: Messages): { needsInjection: boolean; whichPrompt: 1 | 2 | null } {
// const transitionIndex = messages.findIndex(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// if (transitionIndex === -1) return { needsInjection: false, whichPrompt: null };
// // Check what we've already injected after transition
// const messagesAfterTransition = messages.slice(transitionIndex + 1);
// const hasPrompt1 = messagesAfterTransition.some(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasPrompt2 = messagesAfterTransition.some(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_2]')
// );
// if (!hasPrompt1) {
// return { needsInjection: true, whichPrompt: 1 };
// } else if (hasPrompt1 && !hasPrompt2) {
// // Check if we got a response to prompt 1
// const prompt1Index = messagesAfterTransition.findIndex(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasResponseToPrompt1 = messagesAfterTransition.slice(prompt1Index + 1).some(msg =>
// msg.role === 'assistant'
// );
// if (hasResponseToPrompt1) {
// return { needsInjection: true, whichPrompt: 2 };
// }
// }
// return { needsInjection: false, whichPrompt: null };
// }
// function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
// const injectedMessages = [...messages];
// if (promptNumber === 1) {
// injectedMessages.push({
// role: 'user',
// content: '[INJECTED_PROMPT_1] Please review the API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.'
// });
// } else {
// injectedMessages.push({
// role: 'user',
// content: `[INJECTED_PROMPT_2] Rewrite the code using the Modernize Next-js Free design system:
// • Framework - Next.js 14 App Router + TypeScript
// • UI library - Material UI v5; style only with the sx prop and MUI components
// • Theme palette - primary #5D87FF, success #13DEB9, danger #FA896B, warning #FFAE1F
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main; keep shadow-1 cards and 12-col responsive grid
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • File structure - components in /package/src/components/, pages in /package/src/app/ with PascalCase files
// • Write all components as arrow functions, export default, and type props explicitly`
// });
// }
// return injectedMessages;
// }
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }

View File

@ -1,4 +1,16 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16">
<!-- <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 16" width="16" height="16">
<rect width="16" height="16" rx="2" fill="#1389fd" />
<path d="M7.398 9.091h-3.58L10.364 2 8.602 6.909h3.58L5.636 14l1.762-4.909Z" fill="#fff" />
</svg> -->
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" width="64" height="64">
<!-- Yellow background -->
<rect width="64" height="64" fill="#FFD700"/>
<!-- Black circle -->
<circle cx="32" cy="32" r="20" fill="black"/>
<!-- Yellow "E" -->
<text x="32" y="42" text-anchor="middle" font-size="28" font-family="Arial, sans-serif" fill="#FFD700" font-weight="bold">E</text>
</svg>

Before

Width:  |  Height:  |  Size: 241 B

After

Width:  |  Height:  |  Size: 653 B