Use Nut API for developer chat messages (#47)

This commit is contained in:
Brian Hackett
2025-03-10 11:56:14 -07:00
committed by GitHub
parent a37fb7a491
commit 9ca3c9c977
20 changed files with 417 additions and 2076 deletions

View File

@@ -1,7 +1,6 @@
import { memo } from 'react';
import { Markdown } from './Markdown';
import type { JSONValue } from 'ai';
import type { ChatAnthropicInfo, AnthropicCall } from '~/lib/.server/llm/chat-anthropic';
import type { ContentBlockParam } from '@anthropic-ai/sdk/resources/messages/messages.mjs';
import { toast } from 'react-toastify';
@@ -16,7 +15,6 @@ export function getAnnotationsTokensUsage(annotations: JSONValue[] | undefined)
) || []) as { type: string; value: any }[];
const usage: {
chatInfo: ChatAnthropicInfo;
completionTokens: number;
promptTokens: number;
totalTokens: number;
@@ -25,82 +23,14 @@ export function getAnnotationsTokensUsage(annotations: JSONValue[] | undefined)
return usage;
}
function flatMessageContent(content: string | ContentBlockParam[]): string {
if (typeof content === "string") {
return content;
}
if (Array.isArray(content)) {
let result = "";
for (const elem of content) {
if (elem.type === "text") {
result += elem.text;
}
}
return result;
}
console.log("AnthropicUnknownContent", JSON.stringify(content, null, 2));
return "AnthropicUnknownContent";
}
function describeChatInfo(chatInfo: ChatAnthropicInfo) {
let text = "";
function appendCall(call: AnthropicCall) {
text += "************************************************\n";
text += "AnthropicMessageSend\n";
text += "Message system:\n";
text += call.systemPrompt;
for (const message of call.messages) {
text += `Message ${message.role}:\n`;
text += flatMessageContent(message.content);
}
text += "Response:\n";
text += call.responseText;
text += "\n";
text += `Tokens ${call.completionTokens + call.promptTokens}\n`;
text += "************************************************\n";
}
appendCall(chatInfo.mainCall);
for (const call of chatInfo.restoreCalls) {
appendCall(call);
}
for (const info of chatInfo.infos) {
text += info;
}
return text;
}
export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => {
const usage = getAnnotationsTokensUsage(annotations);
const onUsageClicked = () => {
if (!usage.chatInfo) {
toast.error("No chat info found");
return;
}
const text = describeChatInfo(usage.chatInfo);
// Create a blob with the text content
const blob = new Blob([text], { type: 'text/plain' });
const url = URL.createObjectURL(blob);
// Open the blob URL in a new window
window.open(url);
// Clean up the blob URL after a short delay
setTimeout(() => URL.revokeObjectURL(url), 100);
};
return (
<div className="overflow-hidden w-full">
{usage && (
<div
className="text-sm text-bolt-elements-textSecondary mb-2 cursor-pointer hover:underline"
onClick={onUsageClicked}
title="View call information"
className="text-sm text-bolt-elements-textSecondary mb-2"
>
Tokens: {usage.totalTokens} (prompt: {usage.promptTokens}, completion: {usage.completionTokens})
</div>

View File

@@ -4,11 +4,10 @@
*/
import { useStore } from '@nanostores/react';
import type { CreateMessage, Message } from 'ai';
import { useChat } from 'ai/react';
import { useAnimate } from 'framer-motion';
import { memo, useCallback, useEffect, useMemo, useRef, useState } from 'react';
import { cssTransition, toast, ToastContainer } from 'react-toastify';
import { useMessageParser, usePromptEnhancer, useShortcuts, useSnapScroll } from '~/lib/hooks';
import { useMessageParser, useShortcuts, useSnapScroll } from '~/lib/hooks';
import { description, useChatHistory } from '~/lib/persistence';
import { chatStore } from '~/lib/stores/chat';
import { workbenchStore } from '~/lib/stores/workbench';
@@ -22,7 +21,7 @@ import { useSettings } from '~/lib/hooks/useSettings';
import { useSearchParams } from '@remix-run/react';
import { createSampler } from '~/utils/sampler';
import { saveProjectContents } from './Messages.client';
import { getSimulationRecording, getSimulationEnhancedPrompt, simulationAddData, simulationRepositoryUpdated } from '~/lib/replay/SimulationPrompt';
import { getSimulationRecording, getSimulationEnhancedPrompt, simulationAddData, simulationRepositoryUpdated, shouldUseSimulation, sendDeveloperChatMessage, type ProtocolMessage } from '~/lib/replay/SimulationPrompt';
import { getIFrameSimulationData } from '~/lib/replay/Recording';
import { getCurrentIFrame } from '../workbench/Preview';
import { getCurrentMouseData } from '../workbench/PointSelector';
@@ -30,9 +29,9 @@ import { anthropicNumFreeUsesCookieName, anthropicApiKeyCookieName, MaxFreeUses
import type { FileMap } from '~/lib/stores/files';
import { shouldIncludeFile } from '~/utils/fileUtils';
import { getNutLoginKey, submitFeedback } from '~/lib/replay/Problems';
import { shouldUseSimulation } from '~/lib/hooks/useSimulation';
import { ChatMessageTelemetry, pingTelemetry } from '~/lib/hooks/pingTelemetry';
import type { RejectChangeData } from './ApproveChange';
import { assert, generateRandomId } from '~/lib/replay/ReplayProtocolClient';
const toastAnimation = cssTransition({
enter: 'animated fadeInRight',
@@ -41,17 +40,6 @@ const toastAnimation = cssTransition({
const logger = createScopedLogger('Chat');
// Debounce things after file writes to avoid creating a bunch of chats.
let gResetChatFileWrittenTimeout: NodeJS.Timeout | undefined;
export function resetChatFileWritten() {
clearTimeout(gResetChatFileWrittenTimeout);
gResetChatFileWrittenTimeout = setTimeout(async () => {
const { contentBase64 } = await workbenchStore.generateZipBase64();
await simulationRepositoryUpdated(contentBase64);
}, 500);
}
let gLastProjectContents: string | undefined;
export function getLastProjectContents() {
@@ -166,17 +154,33 @@ interface ChatProps {
let gNumAborts = 0;
function filterFiles(files: FileMap): FileMap {
const rv: FileMap = {};
for (const [path, file] of Object.entries(files)) {
if (shouldIncludeFile(path)) {
rv[path] = file;
}
let gActiveChatMessageTelemetry: ChatMessageTelemetry | undefined;
// When files are modified during a chat message we wait until the message finishes
// before updating the simulation.
let gUpdateSimulationAfterChatMessage = false;
async function clearActiveChat() {
gActiveChatMessageTelemetry = undefined;
if (gUpdateSimulationAfterChatMessage) {
const { contentBase64 } = await workbenchStore.generateZipBase64();
await simulationRepositoryUpdated(contentBase64);
gUpdateSimulationAfterChatMessage = false;
}
return rv;
}
let gActiveChatMessageTelemetry: ChatMessageTelemetry | undefined;
export async function onRepositoryFileWritten() {
if (gActiveChatMessageTelemetry) {
gUpdateSimulationAfterChatMessage = true;
} else {
const { contentBase64 } = await workbenchStore.generateZipBase64();
await simulationRepositoryUpdated(contentBase64);
}
}
function buildMessageId(prefix: string, chatId: string) {
return `${prefix}-${chatId}`;
}
export const ChatImpl = memo(
({ description, initialMessages, storeMessageHistory, importChat, exportChat }: ChatProps) => {
@@ -187,57 +191,32 @@ export const ChatImpl = memo(
const [uploadedFiles, setUploadedFiles] = useState<File[]>([]); // Move here
const [imageDataList, setImageDataList] = useState<string[]>([]); // Move here
const [searchParams, setSearchParams] = useSearchParams();
const [simulationLoading, setSimulationLoading] = useState(false);
const files = useStore(workbenchStore.files);
const { promptId } = useSettings();
const [approveChangesMessageId, setApproveChangesMessageId] = useState<string | undefined>(undefined);
// Input currently in the textarea.
const [input, setInput] = useState('');
// This is set when the user has triggered a chat message and the response hasn't finished
// being generated.
const [activeChatId, setActiveChatId] = useState<string | undefined>(undefined);
const isLoading = activeChatId !== undefined;
const [messages, setMessages] = useState<Message[]>(initialMessages);
const { showChat } = useStore(chatStore);
const [animationScope, animate] = useAnimate();
const { messages, isLoading, input, handleInputChange, setInput, stop, append, setMessages } = useChat({
api: '/api/chat',
body: {
files: filterFiles(files),
promptId,
},
sendExtraMessageFields: true,
onError: (error) => {
logger.error('Request failed\n\n', error);
toast.error(
'There was an error processing your request: ' + (error.message ? error.message : 'No details were returned'),
);
},
initialMessages,
initialInput: Cookies.get(PROMPT_COOKIE_KEY) || '',
});
// Once we are no longer loading the message is complete.
if (gActiveChatMessageTelemetry && !isLoading && !simulationLoading) {
gActiveChatMessageTelemetry.finish();
gActiveChatMessageTelemetry = undefined;
}
useEffect(() => {
const prompt = searchParams.get('prompt');
if (prompt) {
setSearchParams({});
runAnimation();
append({
role: 'user',
content: [
{
type: 'text',
text: prompt,
},
] as any, // Type assertion to bypass compiler check
});
sendMessage(prompt);
}
}, [searchParams]);
const { enhancingPrompt, promptEnhanced, enhancePrompt, resetEnhancer } = usePromptEnhancer();
const { parsedMessages, setParsedMessages, parseMessages } = useMessageParser();
const TEXTAREA_MAX_HEIGHT = chatStarted ? 400 : 200;
@@ -256,24 +235,16 @@ export const ChatImpl = memo(
});
}, [messages, isLoading, parseMessages]);
const scrollTextArea = () => {
const textarea = textareaRef.current;
if (textarea) {
textarea.scrollTop = textarea.scrollHeight;
}
};
const abort = () => {
stop();
gNumAborts++;
chatStore.setKey('aborted', true);
workbenchStore.abortAllActions();
setSimulationLoading(false);
setActiveChatId(undefined);
if (gActiveChatMessageTelemetry) {
gActiveChatMessageTelemetry.abort("StopButtonClicked");
gActiveChatMessageTelemetry = undefined;
clearActiveChat();
}
};
@@ -305,7 +276,7 @@ export const ChatImpl = memo(
setChatStarted(true);
};
const createRecording = async () => {
const createRecording = async (chatId: string) => {
let recordingId, message;
try {
recordingId = await getSimulationRecording();
@@ -316,7 +287,7 @@ export const ChatImpl = memo(
}
const recordingMessage: Message = {
id: `create-recording-${messages.length}`,
id: buildMessageId("create-recording", chatId),
role: 'assistant',
content: message,
};
@@ -324,7 +295,7 @@ export const ChatImpl = memo(
return { recordingId, recordingMessage };
};
const getEnhancedPrompt = async (userMessage: string) => {
const getEnhancedPrompt = async (chatId: string, userMessage: string) => {
let enhancedPrompt, message, hadError = false;
try {
const mouseData = getCurrentMouseData();
@@ -337,7 +308,7 @@ export const ChatImpl = memo(
}
const enhancedPromptMessage: Message = {
id: `enhanced-prompt-${Math.random()}`,
id: buildMessageId("enhanced-prompt", chatId),
role: 'assistant',
content: message,
};
@@ -365,14 +336,37 @@ export const ChatImpl = memo(
if (numFreeUses >= MaxFreeUses) {
toast.error('All free uses consumed. Please set a login key or Anthropic API key in the "User Info" settings.');
gActiveChatMessageTelemetry.abort("NoFreeUses");
gActiveChatMessageTelemetry = undefined;
clearActiveChat();
return;
}
Cookies.set(anthropicNumFreeUsesCookieName, (numFreeUses + 1).toString());
}
setSimulationLoading(true);
const chatId = generateRandomId();
setActiveChatId(chatId);
const userMessage: Message = {
id: buildMessageId("user", chatId),
role: 'user',
content: [
{
type: 'text',
text: _input,
},
...imageDataList.map((imageData) => ({
type: 'image',
image: imageData,
})),
] as any, // Type assertion to bypass compiler check
};
let newMessages = [...messages, userMessage];
setMessages(newMessages);
// Add file cleanup here
setUploadedFiles([]);
setImageDataList([]);
/**
* @note (delm) Usually saving files shouldn't take long but it may take longer if there
@@ -383,9 +377,12 @@ export const ChatImpl = memo(
*/
await workbenchStore.saveAllFiles();
let simulationEnhancedPrompt: string | undefined;
const simulation = chatStarted && await shouldUseSimulation(messages, _input);
let simulation = false;
try {
simulation = chatStarted && await shouldUseSimulation(_input);
} catch (e) {
console.error("Error checking simulation", e);
}
if (numAbortsAtStart != gNumAborts) {
return;
@@ -401,8 +398,8 @@ export const ChatImpl = memo(
try {
await flushSimulationData();
const createRecordingPromise = createRecording();
const enhancedPromptPromise = getEnhancedPrompt(_input);
const createRecordingPromise = createRecording(chatId);
const enhancedPromptPromise = getEnhancedPrompt(chatId, _input);
const { recordingId, recordingMessage } = await createRecordingPromise;
@@ -411,7 +408,8 @@ export const ChatImpl = memo(
}
console.log("RecordingMessage", recordingMessage);
setMessages([...messages, recordingMessage]);
newMessages = [...newMessages, recordingMessage];
setMessages(newMessages);
if (recordingId) {
const info = await enhancedPromptPromise;
@@ -420,10 +418,9 @@ export const ChatImpl = memo(
return;
}
simulationEnhancedPrompt = info.enhancedPrompt;
console.log("EnhancedPromptMessage", info.enhancedPromptMessage);
setMessages([...messages, info.enhancedPromptMessage]);
newMessages = [...newMessages, info.enhancedPromptMessage];
setMessages(newMessages);
simulationStatus = info.hadError ? "PromptError" : "Success";
} else {
@@ -442,21 +439,47 @@ export const ChatImpl = memo(
runAnimation();
setSimulationLoading(false);
gActiveChatMessageTelemetry.sendPrompt(simulationStatus);
append({
role: 'user',
content: [
{
type: 'text',
text: _input,
},
...imageDataList.map((imageData) => ({
type: 'image',
image: imageData,
})),
] as any, // Type assertion to bypass compiler check
}, { body: { simulationEnhancedPrompt, anthropicApiKey, loginKey } });
const responseMessageId = buildMessageId("response", chatId);
let responseMessageContent = "";
let hasResponseMessage = false;
const addResponseContent = (content: string) => {
responseMessageContent += content;
if (gNumAborts != numAbortsAtStart) {
return;
}
newMessages = [...newMessages];
if (hasResponseMessage) {
newMessages.pop();
}
newMessages.push({
id: responseMessageId,
role: 'assistant',
content: responseMessageContent,
});
setMessages(newMessages);
hasResponseMessage = true;
}
try {
await sendDeveloperChatMessage(newMessages, files, addResponseContent);
} catch (e) {
console.error("Error sending message", e);
addResponseContent("Error sending message.");
}
if (gNumAborts != numAbortsAtStart) {
return;
}
gActiveChatMessageTelemetry.finish();
clearActiveChat();
setActiveChatId(undefined);
if (fileModifications !== undefined) {
/**
@@ -469,25 +492,13 @@ export const ChatImpl = memo(
setInput('');
Cookies.remove(PROMPT_COOKIE_KEY);
// Add file cleanup here
setUploadedFiles([]);
setImageDataList([]);
resetEnhancer();
textareaRef.current?.blur();
// The project contents are associated with the last message present when
// the user message is added.
const lastMessage = messages[messages.length - 1];
if (lastMessage) {
const { contentBase64 } = await workbenchStore.generateZipBase64();
saveProjectContents(lastMessage.id, { content: contentBase64 });
gLastProjectContents = contentBase64;
setApproveChangesMessageId(lastMessage.id);
}
gActiveChatMessageTelemetry.sendPrompt(simulationStatus);
// The project contents are associated with the response message.
const { contentBase64 } = await workbenchStore.generateZipBase64();
saveProjectContents(responseMessageId, { content: contentBase64 });
gLastProjectContents = contentBase64;
setApproveChangesMessageId(responseMessageId);
};
const onRewind = async (messageId: string, contents: string) => {
@@ -588,7 +599,7 @@ export const ChatImpl = memo(
* @param event - The change event from the textarea.
*/
const onTextareaChange = (event: React.ChangeEvent<HTMLTextAreaElement>) => {
handleInputChange(event);
setInput(event.target.value);
};
/**
@@ -625,9 +636,7 @@ export const ChatImpl = memo(
input={input}
showChat={showChat}
chatStarted={chatStarted}
isStreaming={isLoading || simulationLoading}
enhancingPrompt={enhancingPrompt}
promptEnhanced={promptEnhanced}
isStreaming={isLoading}
sendMessage={sendMessage}
messageRef={messageRef}
scrollRef={scrollRef}
@@ -640,15 +649,6 @@ export const ChatImpl = memo(
importChat={importChat}
exportChat={exportChat}
messages={chatMessages}
enhancePrompt={() => {
enhancePrompt(
input,
(input) => {
setInput(input);
scrollTextArea();
},
);
}}
uploadedFiles={uploadedFiles}
setUploadedFiles={setUploadedFiles}
imageDataList={imageDataList}

View File

@@ -1,6 +1,5 @@
import React from 'react';
import { Switch } from '~/components/ui/Switch';
import { PromptLibrary } from '~/lib/common/prompt-library';
import { useSettings } from '~/lib/hooks/useSettings';
export default function FeaturesTab() {
@@ -82,25 +81,6 @@ export default function FeaturesTab() {
Enable experimental providers such as Ollama, LMStudio, and OpenAILike.
</p>
</div>
<div className="flex items-start justify-between pt-4 mb-2 gap-2">
<div className="flex-1 max-w-[200px]">
<span className="text-bolt-elements-textPrimary">Prompt Library</span>
<p className="text-xs text-bolt-elements-textTertiary mb-4">
Choose a prompt from the library to use as the system prompt.
</p>
</div>
<select
value={promptId}
onChange={(e) => setPromptId(e.target.value)}
className="flex-1 p-2 ml-auto rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none focus:ring-2 focus:ring-bolt-elements-focus transition-all text-sm min-w-[100px]"
>
{PromptLibrary.getList().map((x) => (
<option key={x.id} value={x.id}>
{x.label}
</option>
))}
</select>
</div>
</div>
</div>
);

View File

@@ -1,494 +0,0 @@
import type { CoreMessage } from 'ai';
import Anthropic from '@anthropic-ai/sdk';
import { ChatStreamController } from '~/utils/chatStreamController';
import type { ContentBlockParam, MessageParam } from '@anthropic-ai/sdk/resources/messages/messages.mjs';
import type { FileMap } from './stream-text';
import { StreamingMessageParser } from '~/lib/runtime/message-parser';
import { extractRelativePath } from '~/utils/diff';
import { wrapWithSpan, getCurrentSpan } from '~/lib/.server/otel-wrapper';
import { assert } from '~/lib/replay/ReplayProtocolClient';
const Model = 'claude-3-7-sonnet-20250219';
const MaxMessageTokens = 8192;
function convertContentToAnthropic(content: any): ContentBlockParam[] {
if (typeof content === "string") {
return [{ type: "text", text: content }];
}
if (Array.isArray(content)) {
return content.flatMap(convertContentToAnthropic);
}
if (content.type === "text" && typeof content.text === "string") {
return [{ type: "text", text: content.text }];
}
if (content.type == "image" && typeof content.image == "string") {
// Parse data URL to extract content type and base64 data
const matches = content.image.match(/^data:([^;]+);base64,(.+)$/);
if (!matches) {
console.log("Invalid image data URL format");
return [];
}
return [{
type: "image",
source: {
type: "base64",
data: matches[2],
media_type: matches[1]
}
}];
}
console.log("AnthropicUnknownContent", JSON.stringify(content, null, 2));
return [];
}
export interface ChatState {
apiKey: string;
isUser: boolean;
userLoginKey?: string;
// Info about how the chat was processed which will be conveyed back to the client.
infos: string[];
}
export interface AnthropicCall {
systemPrompt: string;
messages: MessageParam[];
responseText: string;
completionTokens: number;
promptTokens: number;
}
function maybeParseAnthropicErrorPromptTooLong(e: any) {
try {
const { type, message } = e.error.error;
if (type === "invalid_request_error") {
const match = /prompt is too long: (\d+) tokens > (\d+) maximum/.exec(message);
if (match) {
const tokens = +match[1];
const maximum = +match[2];
return { tokens, maximum };
}
}
} catch (e) {
console.log("AnthropicParseError", e);
}
return undefined;
}
async function countTokens(state: ChatState, messages: MessageParam[], systemPrompt: string): Promise<number> {
const anthropic = new Anthropic({ apiKey: state.apiKey });
const response = await anthropic.messages.countTokens({
model: Model,
messages,
system: systemPrompt,
});
return response.input_tokens;
}
function compressMessageText(text: string, factor: number): string {
assert(factor > 0 && factor <= 1, `Invalid compression factor: ${factor}`);
return text.slice(text.length - Math.round(text.length * factor));
}
function compressMessage(msg: MessageParam, factor: number): MessageParam {
// Only compress assistant messages.
if (msg.role != "assistant") {
return msg;
}
const newMessage = { ...msg };
if (typeof newMessage.content === "string") {
newMessage.content = compressMessageText(newMessage.content, factor);
} else if (Array.isArray(newMessage.content)) {
newMessage.content = newMessage.content.map(block => {
const newBlock = { ...block };
if (newBlock.type === "text") {
newBlock.text = compressMessageText(newBlock.text, factor);
}
return newBlock;
});
}
return newMessage;
}
function compressMessages(messages: MessageParam[], factor: number): MessageParam[] {
const compressed = [];
for (const msg of messages) {
compressed.push(compressMessage(msg, factor));
}
return compressed;
}
async function reduceMessageSize(state: ChatState, messages: MessageParam[], systemPrompt: string, maximum: number): Promise<MessageParam[]> {
for (let iteration = 0; iteration < 5; iteration++) {
const tokens = await countTokens(state, messages, systemPrompt);
console.log(`AnthropicReduceMessageSize ${JSON.stringify({ iteration, tokens, maximum })}`);
state.infos.push(`AnthropicReduceMessageSize ${JSON.stringify({ iteration, tokens, maximum })}`);
if (tokens <= maximum) {
return messages;
}
const factor = (maximum / tokens) * 0.9;
// Compress messages to roughly target size
messages = compressMessages(messages, factor);
}
throw new Error("Message compression failed");
}
async function callAnthropicRaw(state: ChatState, systemPrompt: string, messages: MessageParam[]): Promise<Anthropic.Messages.Message> {
const anthropic = new Anthropic({ apiKey: state.apiKey });
try {
return await anthropic.messages.create({
model: Model,
messages,
max_tokens: MaxMessageTokens,
system: systemPrompt,
});
} catch (e: any) {
console.error("AnthropicError", e);
state.infos.push(`AnthropicError: ${e}`);
try {
console.log(`AnthropicErrorData ${JSON.stringify(e.error)}`);
state.infos.push(`AnthropicErrorData ${JSON.stringify(e.error)}`);
} catch (e) {
console.log(`AnthropicErrorDataException ${e}`);
state.infos.push(`AnthropicErrorDataException ${e}`);
}
state.infos.push(`AnthropicErrorMessages ${JSON.stringify({ systemPrompt, messages })}`);
const info = maybeParseAnthropicErrorPromptTooLong(e);
if (info) {
const { maximum } = info;
const newMessages = await reduceMessageSize(state, messages, systemPrompt, maximum);
state.infos.push(`AnthropicCompressedMessages ${JSON.stringify({ systemPrompt, newMessages })}`);
return await anthropic.messages.create({
model: Model,
messages: newMessages,
max_tokens: MaxMessageTokens,
system: systemPrompt,
});
}
throw e;
}
}
export const callAnthropic = wrapWithSpan(
{
name: "llm-call",
attrs: {
"llm.provider": "anthropic",
"llm.model": Model,
},
},
// eslint-disable-next-line prefer-arrow-callback
async function callAnthropic(state: ChatState, reason: string, systemPrompt: string, messages: MessageParam[]): Promise<AnthropicCall> {
const span = getCurrentSpan();
span?.setAttributes({
"llm.chat.calls": 1, // so we can SUM(llm.chat.calls) without doing a COUNT + filter
"llm.chat.num_messages": messages.length,
"llm.chat.reason": reason,
"llm.chat.is_user_api_key": state.isUser,
"llm.chat.user_login_key": state.userLoginKey,
});
console.log("AnthropicMessageSend");
const response = await callAnthropicRaw(state, systemPrompt, messages);
let responseText = "";
for (const content of response.content) {
if (content.type === "text") {
responseText += content.text;
} else {
console.log("AnthropicUnknownResponse", JSON.stringify(content, null, 2));
}
}
const completionTokens = response.usage.output_tokens;
const promptTokens = response.usage.input_tokens;
span?.setAttributes({
"llm.chat.prompt_tokens": promptTokens,
"llm.chat.completion_tokens": completionTokens,
// to save us needing to worry about a derived column
"llm.chat.total_tokens": completionTokens + promptTokens,
});
console.log("AnthropicMessageResponse");
return {
systemPrompt,
messages,
responseText,
completionTokens,
promptTokens,
};
},
);
function getFileContents(files: FileMap, path: string): string {
for (const [filePath, file] of Object.entries(files)) {
if (extractRelativePath(filePath) === path) {
if (file?.type === "file" && !file.isBinary) {
return file.content;
}
}
}
return "";
}
function shouldRestorePartialFile(existingContent: string, newContent: string): boolean {
return existingContent.length > newContent.length;
}
async function restorePartialFile(
state: ChatState,
existingContent: string,
newContent: string,
responseDescription: string
) {
const systemPrompt = `
You are a helpful assistant that restores code skipped over by partial updates made by another assistant.
You will be given the existing content for a file and the new content that may contain partial updates.
Your task is to return complete restored content which both reflects the changes made in the new content
and includes any code that was removed from the original file.
Describe any places in the new content where code may have been removed.
ULTRA IMPORTANT: Only remove content that has been skipped due to comments similar to the following:
// rest of the code remains the same.
// this function is unchanged.
ULTRA IMPORTANT: Do not restore content that was intentionally removed by the other assistant.
ULTRA IMPORTANT: The restored content should be returned in the following format:
<restoredContent>
Restored content goes here
</restoredContent>
`;
const userPrompt = `
The existing content for the file is:
<existingContent>
${existingContent}
</existingContent>
The new content that may contain partial updates is:
<newContent>
${newContent}
</newContent>
The other assistant's description of its changes is:
<description>
${responseDescription}
</description>
`;
const messages: MessageParam[] = [
{
role: "user",
content: userPrompt,
},
];
const restoreCall = await callAnthropic(state, "RestorePartialFile", systemPrompt, messages);
const OpenTag = "<restoredContent>";
const CloseTag = "</restoredContent>";
const openTag = restoreCall.responseText.indexOf(OpenTag);
const closeTag = restoreCall.responseText.indexOf(CloseTag);
if (openTag === -1 || closeTag === -1) {
state.infos.push(`Error: Invalid restored content: ${restoreCall.responseText}`);
return { restoreCall, restoredContent: newContent };
}
const restoredContent = restoreCall.responseText.substring(openTag + OpenTag.length, closeTag);
// Sometimes the model ignores its instructions and doesn't return the content if it hasn't
// made any modifications. In this case we use the unmodified new content.
if (restoredContent.length < existingContent.length && restoredContent.length < newContent.length) {
state.infos.push(`Error: Restored content too short: ${restoreCall.responseText}`);
return { restoreCall, restoredContent: newContent };
}
return { restoreCall, restoredContent };
}
// Return the english description in a model response, skipping over any artifacts.
function getMessageDescription(responseText: string): string {
const OpenTag = "<boltArtifact";
const CloseTag = "</boltArtifact>";
while (true) {
const openTag = responseText.indexOf(OpenTag);
if (openTag === -1) {
break;
}
const prefix = responseText.substring(0, openTag);
const closeTag = responseText.indexOf(CloseTag, openTag + OpenTag.length);
if (closeTag === -1) {
responseText = prefix;
} else {
responseText = prefix + responseText.substring(closeTag + CloseTag.length);
}
}
return responseText;
}
async function getLatestPackageVersion(state: ChatState, packageName: string) {
try {
const response = await fetch(`https://registry.npmjs.org/${packageName}/latest`);
const data = await response.json() as any;
if (typeof data.version == "string") {
return data.version;
}
} catch (e) {
state.infos.push(`Error getting latest package version: ${packageName}`);
}
return undefined;
}
function ignorePackageUpgrade(packageName: string) {
// Don't upgrade react, our support for react 19 isn't complete yet.
return packageName.startsWith("react");
}
// Upgrade dependencies in package.json to the latest version, instead of the random
// and sometimes ancient versions that the AI picks.
async function upgradePackageJSON(state: ChatState, content: string) {
try {
const packageJSON = JSON.parse(content);
for (const key of Object.keys(packageJSON.dependencies)) {
if (!ignorePackageUpgrade(key)) {
const version = await getLatestPackageVersion(state, key);
if (version) {
packageJSON.dependencies[key] = version;
}
}
}
return JSON.stringify(packageJSON, null, 2);
} catch (e) {
state.infos.push(`Error upgrading package.json: ${e}`);
return content;
}
}
function replaceFileContents(state: ChatState, responseText: string, oldContent: string, newContent: string) {
let contentIndex = responseText.indexOf(oldContent);
if (contentIndex === -1) {
// The old content may have a trailing newline which wasn't originally present in the response.
oldContent = oldContent.trim();
contentIndex = responseText.indexOf(oldContent);
if (contentIndex == -1) {
state.infos.push(`Error: Old content not found in response: ${JSON.stringify({ responseText, oldContent })}`);
return responseText;
}
}
return responseText.substring(0, contentIndex) +
newContent +
responseText.substring(contentIndex + oldContent.length);
}
interface FileContents {
filePath: string;
content: string;
}
async function fixupResponseFiles(state: ChatState, files: FileMap, responseText: string) {
const fileContents: FileContents[] = [];
const messageParser = new StreamingMessageParser({
callbacks: {
onActionClose: (data) => {
if (data.action.type === "file") {
const { filePath, content } = data.action;
fileContents.push({
filePath,
content,
});
}
},
}
});
messageParser.parse("restore-partial-files-message-id", responseText);
const responseDescription = getMessageDescription(responseText);
const restoreCalls: AnthropicCall[] = [];
for (const { filePath, content: newContent } of fileContents) {
const existingContent = getFileContents(files, filePath);
if (shouldRestorePartialFile(existingContent, newContent)) {
const { restoreCall, restoredContent } = await restorePartialFile(
state,
existingContent,
newContent,
responseDescription
);
restoreCalls.push(restoreCall);
responseText = replaceFileContents(state, responseText, newContent, restoredContent);
}
if (filePath.includes("package.json")) {
const newPackageJSON = await upgradePackageJSON(state, newContent);
responseText = replaceFileContents(state, responseText, newContent, newPackageJSON);
}
}
return { responseText, restoreCalls };
}
export type ChatAnthropicInfo = {
mainCall: AnthropicCall;
restoreCalls: AnthropicCall[];
infos: string[];
}
export async function chatAnthropic(state: ChatState, chatController: ChatStreamController, files: FileMap, systemPrompt: string, messages: CoreMessage[]) {
const messageParams: MessageParam[] = [];
for (const message of messages) {
const role = message.role == "user" ? "user" : "assistant";
const content = convertContentToAnthropic(message.content);
messageParams.push({
role,
content,
});
}
const mainCall = await callAnthropic(state, "SendChatMessage", systemPrompt, messageParams);
const { responseText, restoreCalls } = await fixupResponseFiles(state, files, mainCall.responseText);
chatController.writeText(responseText);
const chatInfo: ChatAnthropicInfo = { mainCall, restoreCalls, infos: state.infos };
let completionTokens = 0;
let promptTokens = 0;
for (const callInfo of [mainCall, ...restoreCalls]) {
completionTokens += callInfo.completionTokens;
promptTokens += callInfo.promptTokens;
}
chatController.writeUsage({ chatInfo, completionTokens, promptTokens });
}

View File

@@ -1,233 +0,0 @@
import { convertToCoreMessages, streamText as _streamText } from 'ai';
import { MAX_TOKENS } from './constants';
import { getSystemPrompt } from '~/lib/common/prompts/prompts';
import {
DEFAULT_MODEL,
DEFAULT_PROVIDER,
getModelList,
MODEL_REGEX,
MODIFICATIONS_TAG_NAME,
PROVIDER_LIST,
PROVIDER_REGEX,
WORK_DIR,
} from '~/utils/constants';
import ignore from 'ignore';
import type { IProviderSetting } from '~/types/model';
import { PromptLibrary } from '~/lib/common/prompt-library';
import { allowedHTMLElements } from '~/utils/markdown';
interface ToolResult<Name extends string, Args, Result> {
toolCallId: string;
toolName: Name;
args: Args;
result: Result;
}
interface Message {
role: 'user' | 'assistant';
content: string;
toolInvocations?: ToolResult<string, unknown, unknown>[];
model?: string;
}
export type Messages = Message[];
export type StreamingOptions = Omit<Parameters<typeof _streamText>[0], 'model'>;
export interface File {
type: 'file';
content: string;
isBinary: boolean;
}
export interface Folder {
type: 'folder';
}
type Dirent = File | Folder;
export type FileMap = Record<string, Dirent | undefined>;
export function simplifyBoltActions(input: string): string {
// Using regex to match boltAction tags that have type="file"
const regex = /(<boltAction[^>]*type="file"[^>]*>)([\s\S]*?)(<\/boltAction>)/g;
// Replace each matching occurrence
return input.replace(regex, (_0, openingTag, _2, closingTag) => {
return `${openingTag}\n ...\n ${closingTag}`;
});
}
// Common patterns to ignore, similar to .gitignore
const IGNORE_PATTERNS = [
'node_modules/**',
'.git/**',
'dist/**',
'build/**',
'.next/**',
'coverage/**',
'.cache/**',
'.vscode/**',
'.idea/**',
'**/*.log',
'**/.DS_Store',
'**/npm-debug.log*',
'**/yarn-debug.log*',
'**/yarn-error.log*',
'**/*lock.json',
'**/*lock.yml',
];
const ig = ignore().add(IGNORE_PATTERNS);
function createFilesContext(files: FileMap) {
let filePaths = Object.keys(files);
filePaths = filePaths.filter((x) => {
const relPath = x.replace('/home/project/', '');
return !ig.ignores(relPath);
});
const fileContexts = filePaths
.filter((x) => files[x] && files[x].type == 'file')
.map((path) => {
const dirent = files[path];
if (!dirent || dirent.type == 'folder') {
return '';
}
const codeWithLinesNumbers = dirent.content
.split('\n')
.map((v, i) => `${i + 1}|${v}`)
.join('\n');
return `<file path="${path}">\n${codeWithLinesNumbers}\n</file>`;
});
return `Below are the code files present in the webcontainer:\ncode format:\n<line number>|<line content>\n <codebase>${fileContexts.join('\n\n')}\n\n</codebase>`;
}
function extractPropertiesFromMessage(message: Message): { model: string; provider: string; content: string } {
const textContent = Array.isArray(message.content)
? message.content.find((item) => item.type === 'text')?.text || ''
: message.content;
const modelMatch = textContent.match(MODEL_REGEX);
const providerMatch = textContent.match(PROVIDER_REGEX);
/*
* Extract model
* const modelMatch = message.content.match(MODEL_REGEX);
*/
const model = modelMatch ? modelMatch[1] : DEFAULT_MODEL;
/*
* Extract provider
* const providerMatch = message.content.match(PROVIDER_REGEX);
*/
const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER.name;
const cleanedContent = Array.isArray(message.content)
? message.content.map((item) => {
if (item.type === 'text') {
return {
type: 'text',
text: item.text?.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, ''),
};
}
return item; // Preserve image_url and other types as is
})
: textContent.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '');
return { model, provider, content: cleanedContent };
}
export async function getStreamTextArguments(props: {
messages: Messages;
env: Env;
apiKeys?: Record<string, string>;
files?: FileMap;
providerSettings?: Record<string, IProviderSetting>;
promptId?: string;
}) {
const { messages, env: serverEnv, apiKeys, files, providerSettings, promptId } = props;
// console.log({serverEnv});
let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name;
const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
const processedMessages = messages.map((message) => {
if (message.role === 'user') {
const { model, provider, content } = extractPropertiesFromMessage(message);
if (MODEL_LIST.find((m) => m.name === model)) {
currentModel = model;
}
currentProvider = provider;
return { ...message, content };
} else if (message.role == 'assistant') {
const content = message.content;
// content = simplifyBoltActions(content);
return { ...message, content };
}
return message;
});
const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
let systemPrompt =
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
cwd: WORK_DIR,
allowedHtmlElements: allowedHTMLElements,
modificationTagName: MODIFICATIONS_TAG_NAME,
}) ?? getSystemPrompt();
let codeContext = '';
if (files) {
codeContext = createFilesContext(files);
codeContext = '';
systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
}
const coreMessages = convertToCoreMessages(processedMessages as any);
return {
currentModel,
currentProvider,
system: systemPrompt,
maxTokens: dynamicMaxTokens,
messages: coreMessages,
};
}
export async function streamText(props: {
messages: Messages;
env: Env;
options?: StreamingOptions;
apiKeys?: Record<string, string>;
files?: FileMap;
providerSettings?: Record<string, IProviderSetting>;
promptId?: string;
}) {
const args = await getStreamTextArguments(props);
const { currentModel, currentProvider } = args;
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
const model = provider.getModelInstance({
model: currentModel,
serverEnv: props.env,
apiKeys: props.apiKeys,
providerSettings: props.providerSettings,
});
return _streamText({ ...args, ...props.options, model });
}

View File

@@ -1,49 +0,0 @@
import { getSystemPrompt } from './prompts/prompts';
import optimized from './prompts/optimized';
export interface PromptOptions {
cwd: string;
allowedHtmlElements: string[];
modificationTagName: string;
}
export class PromptLibrary {
static library: Record<
string,
{
label: string;
description: string;
get: (options: PromptOptions) => string;
}
> = {
default: {
label: 'Default Prompt',
description: 'This is the battle tested default system Prompt',
get: (options) => getSystemPrompt(options.cwd),
},
optimized: {
label: 'Optimized Prompt (experimental)',
description: 'an Experimental version of the prompt for lower token usage',
get: (options) => optimized(options),
},
};
static getList() {
return Object.entries(this.library).map(([key, value]) => {
const { label, description } = value;
return {
id: key,
label,
description,
};
});
}
static getPropmtFromLibrary(promptId: string, options: PromptOptions) {
const prompt = this.library[promptId];
if (!prompt) {
throw 'Prompt Now Found';
}
return this.library[promptId]?.get(options);
}
}

View File

@@ -1,199 +0,0 @@
import type { PromptOptions } from '~/lib/common/prompt-library';
export default (options: PromptOptions) => {
const { cwd, allowedHtmlElements, modificationTagName } = options;
return `
You are Bolt, an expert AI assistant and exceptional senior software developer with vast knowledge across multiple programming languages, frameworks, and best practices.
<system_constraints>
- Operating in WebContainer, an in-browser Node.js runtime
- Limited Python support: standard library only, no pip
- No C/C++ compiler, native binaries, or Git
- Prefer Node.js scripts over shell scripts
- Use Vite for web servers
- Databases: prefer libsql, sqlite, or non-native solutions
- When for react dont forget to write vite config and index.html to the project
Available shell commands: cat, cp, ls, mkdir, mv, rm, rmdir, touch, hostname, ps, pwd, uptime, env, node, python3, code, jq, curl, head, sort, tail, clear, which, export, chmod, scho, kill, ln, xxd, alias, getconf, loadenv, wasm, xdg-open, command, exit, source
</system_constraints>
<code_formatting_info>
Use 2 spaces for indentation
</code_formatting_info>
<message_formatting_info>
Available HTML elements: ${allowedHtmlElements.join(', ')}
</message_formatting_info>
<diff_spec>
File modifications in \`<${modificationTagName}>\` section:
- \`<diff path="/path/to/file">\`: GNU unified diff format
- \`<file path="/path/to/file">\`: Full new content
</diff_spec>
<chain_of_thought_instructions>
do not mention the phrase "chain of thought"
Before solutions, briefly outline implementation steps (2-4 lines max):
- List concrete steps
- Identify key components
- Note potential challenges
- Do not write the actual code just the plan and structure if needed
- Once completed planning start writing the artifacts
</chain_of_thought_instructions>
<artifact_info>
Create a single, comprehensive artifact for each project:
- Use \`<boltArtifact>\` tags with \`title\` and \`id\` attributes
- Use \`<boltAction>\` tags with \`type\` attribute:
- shell: Run commands
- file: Write/update files (use \`filePath\` attribute)
- start: Start dev server (only when necessary)
- Order actions logically
- Install dependencies first
- Provide full, updated content for all files
- Use coding best practices: modular, clean, readable code
</artifact_info>
# CRITICAL RULES - NEVER IGNORE
## File and Command Handling
1. ALWAYS use artifacts for file contents and commands - NO EXCEPTIONS
2. When writing a file, INCLUDE THE ENTIRE FILE CONTENT - NO PARTIAL UPDATES
3. For modifications, ONLY alter files that require changes - DO NOT touch unaffected files
## Response Format
4. Use markdown EXCLUSIVELY - HTML tags are ONLY allowed within artifacts
5. Be concise - Explain ONLY when explicitly requested
6. NEVER use the word "artifact" in responses
## Development Process
7. ALWAYS think and plan comprehensively before providing a solution
8. Current working directory: \`${cwd} \` - Use this for all file paths
9. Don't use cli scaffolding to steup the project, use cwd as Root of the project
11. For nodejs projects ALWAYS install dependencies after writing package.json file
## Coding Standards
10. ALWAYS create smaller, atomic components and modules
11. Modularity is PARAMOUNT - Break down functionality into logical, reusable parts
12. IMMEDIATELY refactor any file exceeding 250 lines
13. ALWAYS plan refactoring before implementation - Consider impacts on the entire system
## Artifact Usage
22. Use \`<boltArtifact>\` tags with \`title\` and \`id\` attributes for each project
23. Use \`<boltAction>\` tags with appropriate \`type\` attribute:
- \`shell\`: For running commands
- \`file\`: For writing/updating files (include \`filePath\` attribute)
- \`start\`: For starting dev servers (use only when necessary/ or new dependencies are installed)
24. Order actions logically - dependencies MUST be installed first
25. For Vite project must include vite config and index.html for entry point
26. Provide COMPLETE, up-to-date content for all files - NO placeholders or partial updates
CRITICAL: These rules are ABSOLUTE and MUST be followed WITHOUT EXCEPTION in EVERY response.
Examples:
<examples>
<example>
<user_query>Can you help me create a JavaScript function to calculate the factorial of a number?</user_query>
<assistant_response>
Certainly, I can help you create a JavaScript function to calculate the factorial of a number.
<boltArtifact id="factorial-function" title="JavaScript Factorial Function">
<boltAction type="file" filePath="index.js">
function factorial(n) {
...
}
...
</boltAction>
<boltAction type="shell">
node index.js
</boltAction>
</boltArtifact>
</assistant_response>
</example>
<example>
<user_query>Build a snake game</user_query>
<assistant_response>
Certainly! I'd be happy to help you build a snake game using JavaScript and HTML5 Canvas. This will be a basic implementation that you can later expand upon. Let's create the game step by step.
<boltArtifact id="snake-game" title="Snake Game in HTML and JavaScript">
<boltAction type="file" filePath="package.json">
{
"name": "snake",
"scripts": {
"dev": "vite"
}
...
}
</boltAction>
<boltAction type="shell">
npm install --save-dev vite
</boltAction>
<boltAction type="file" filePath="index.html">
...
</boltAction>
<boltAction type="start">
npm run dev
</boltAction>
</boltArtifact>
Now you can play the Snake game by opening the provided local server URL in your browser. Use the arrow keys to control the snake. Eat the red food to grow and increase your score. The game ends if you hit the wall or your own tail.
</assistant_response>
</example>
<example>
<user_query>Make a bouncing ball with real gravity using React</user_query>
<assistant_response>
Certainly! I'll create a bouncing ball with real gravity using React. We'll use the react-spring library for physics-based animations.
<boltArtifact id="bouncing-ball-react" title="Bouncing Ball with Gravity in React">
<boltAction type="file" filePath="package.json">
{
"name": "bouncing-ball",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-spring": "^9.7.1"
},
"devDependencies": {
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@vitejs/plugin-react": "^3.1.0",
"vite": "^4.2.0"
}
}
</boltAction>
<boltAction type="file" filePath="index.html">
...
</boltAction>
<boltAction type="file" filePath="src/main.jsx">
...
</boltAction>
<boltAction type="file" filePath="src/index.css">
...
</boltAction>
<boltAction type="file" filePath="src/App.jsx">
...
</boltAction>
<boltAction type="start">
npm run dev
</boltAction>
</boltArtifact>
You can now view the bouncing ball animation in the preview. The ball will start falling from the top of the screen and bounce realistically when it hits the bottom.
</assistant_response>
</example>
</examples>
Always use artifacts for file contents and commands, following the format shown in these examples.
`;
};

View File

@@ -1,116 +1,17 @@
import { MODIFICATIONS_TAG_NAME, WORK_DIR } from '~/utils/constants';
import { allowedHTMLElements } from '~/utils/markdown';
import { stripIndents } from '~/utils/stripIndent';
export const getSystemPrompt = (cwd: string = WORK_DIR) => `
You are Bolt, an expert AI assistant and exceptional senior software developer with vast knowledge across multiple programming languages, frameworks, and best practices.
export const DeveloperSystemPrompt = `
You are Nut, an expert AI assistant and exceptional senior software developer with vast knowledge across multiple programming languages, frameworks, and best practices.
<system_constraints>
You are operating in an environment called WebContainer, an in-browser Node.js runtime that emulates a Linux system to some degree. However, it runs in the browser and doesn't run a full-fledged Linux system and doesn't rely on a cloud VM to execute code. All code is executed in the browser. It does come with a shell that emulates zsh. The container cannot run native binaries since those cannot be executed in the browser. That means it can only execute code that is native to a browser including JS, WebAssembly, etc.
The shell comes with \`python\` and \`python3\` binaries, but they are LIMITED TO THE PYTHON STANDARD LIBRARY ONLY This means:
- There is NO \`pip\` support! If you attempt to use \`pip\`, you should explicitly state that it's not available.
- CRITICAL: Third-party libraries cannot be installed or imported.
- Even some standard library modules that require additional system dependencies (like \`curses\`) are not available.
- Only modules from the core Python standard library can be used.
Additionally, there is no \`g++\` or any C/C++ compiler available. WebContainer CANNOT run native binaries or compile C/C++ code!
Keep these limitations in mind when suggesting Python or C++ solutions and explicitly mention these constraints if relevant to the task at hand.
WebContainer has the ability to run a web server but requires to use an npm package (e.g., Vite, servor, serve, http-server) or use the Node.js APIs to implement a web server.
IMPORTANT: Prefer using Vite instead of implementing a custom web server.
IMPORTANT: Git is NOT available.
IMPORTANT: Prefer writing Node.js scripts instead of shell scripts. The environment doesn't fully support shell scripts, so use Node.js for scripting tasks whenever possible!
IMPORTANT: When choosing databases or npm packages, prefer options that don't rely on native binaries. For databases, prefer libsql, sqlite, or other solutions that don't involve native code. WebContainer CANNOT execute arbitrary native binaries.
Available shell commands:
File Operations:
- cat: Display file contents
- cp: Copy files/directories
- ls: List directory contents
- mkdir: Create directory
- mv: Move/rename files
- rm: Remove files
- rmdir: Remove empty directories
- touch: Create empty file/update timestamp
System Information:
- hostname: Show system name
- ps: Display running processes
- pwd: Print working directory
- uptime: Show system uptime
- env: Environment variables
Development Tools:
- node: Execute Node.js code
- python3: Run Python scripts
- code: VSCode operations
- jq: Process JSON
Other Utilities:
- curl, head, sort, tail, clear, which, export, chmod, scho, hostname, kill, ln, xxd, alias, false, getconf, true, loadenv, wasm, xdg-open, command, exit, source
</system_constraints>
For all designs you produce, make them beautiful and modern.
<code_formatting_info>
Use 2 spaces for code indentation
</code_formatting_info>
<message_formatting_info>
You can make the output pretty by using only the following available HTML elements: ${allowedHTMLElements.map((tagName) => `<${tagName}>`).join(', ')}
</message_formatting_info>
<diff_spec>
For user-made file modifications, a \`<${MODIFICATIONS_TAG_NAME}>\` section will appear at the start of the user message. It will contain either \`<diff>\` or \`<file>\` elements for each modified file:
- \`<diff path="/some/file/path.ext">\`: Contains GNU unified diff format changes
- \`<file path="/some/file/path.ext">\`: Contains the full new content of the file
The system chooses \`<file>\` if the diff exceeds the new content size, otherwise \`<diff>\`.
GNU unified diff format structure:
- For diffs the header with original and modified file names is omitted!
- Changed sections start with @@ -X,Y +A,B @@ where:
- X: Original file starting line
- Y: Original file line count
- A: Modified file starting line
- B: Modified file line count
- (-) lines: Removed from original
- (+) lines: Added in modified version
- Unmarked lines: Unchanged context
Example:
<${MODIFICATIONS_TAG_NAME}>
<diff path="${WORK_DIR}/src/main.js">
@@ -2,7 +2,10 @@
return a + b;
}
-console.log('Hello, World!');
+console.log('Hello, Bolt!');
+
function greet() {
- return 'Greetings!';
+ return 'Greetings!!';
}
+
+console.log('The End');
</diff>
<file path="${WORK_DIR}/package.json">
// full file content here
</file>
</${MODIFICATIONS_TAG_NAME}>
</diff_spec>
<chain_of_thought_instructions>
Before providing a solution, BRIEFLY outline your implementation steps. This helps ensure systematic thinking and clear communication. Your planning should:
Before providing a solution, BRIEFLY outline your implementation steps.
This helps ensure systematic thinking and clear communication. Your planning should:
- List concrete steps you'll take
- Identify key components needed
- Note potential challenges
@@ -139,204 +40,63 @@ You are Bolt, an expert AI assistant and exceptional senior software developer w
</chain_of_thought_instructions>
<artifact_info>
Bolt creates a SINGLE, comprehensive artifact for each project. The artifact contains all necessary steps and components, including:
- Shell commands to run including dependencies to install using a package manager (NPM)
- Files to create and their contents
- Folders to create if necessary
<artifact_instructions>
1. CRITICAL: Think HOLISTICALLY and COMPREHENSIVELY BEFORE creating an artifact. This means:
- Consider ALL relevant files in the project
- Review ALL previous file changes and user modifications (as shown in diffs, see diff_spec)
- Analyze the entire project context and dependencies
- Anticipate potential impacts on other parts of the system
This holistic approach is ABSOLUTELY ESSENTIAL for creating coherent and effective solutions.
2. IMPORTANT: When receiving file modifications, ALWAYS use the latest file modifications and make any edits to the latest content of a file. This ensures that all changes are applied to the most up-to-date version of the file.
3. The current working directory is \`${cwd}\`.
4. Wrap the content in opening and closing \`<boltArtifact>\` tags. These tags contain more specific \`<boltAction>\` elements.
5. Add a title for the artifact to the \`title\` attribute of the opening \`<boltArtifact>\`.
6. Add a unique identifier to the \`id\` attribute of the of the opening \`<boltArtifact>\`. For updates, reuse the prior identifier. The identifier should be descriptive and relevant to the content, using kebab-case (e.g., "example-code-snippet"). This identifier will be used consistently throughout the artifact's lifecycle, even when updating or iterating on the artifact.
7. Use \`<boltAction>\` tags to define specific actions to perform.
8. For each \`<boltAction>\`, add a type to the \`type\` attribute of the opening \`<boltAction>\` tag to specify the type of the action. Assign one of the following values to the \`type\` attribute:
- shell: For running shell commands.
- When Using \`npx\`, ALWAYS provide the \`--yes\` flag.
- When running multiple shell commands, use \`&&\` to run them sequentially.
- ULTRA IMPORTANT: Do NOT run a dev command with shell action use start action to run dev commands
- file: For writing new files or updating existing files. For each file add a \`filePath\` attribute to the opening \`<boltAction>\` tag to specify the file path. The content of the file artifact is the file contents. All file paths MUST BE relative to the current working directory.
- start: For starting a development server.
- Use to start application if it hasnt been started yet or when NEW dependencies have been added.
- Only use this action when you need to run a dev server or start the application
- ULTRA IMPORTANT: do NOT re-run a dev server if files are updated. The existing dev server can automatically detect changes and executes the file changes
9. The order of the actions is VERY IMPORTANT. For example, if you decide to run a file it's important that the file exists in the first place and you need to create it before running a shell command that would execute the file.
10. ALWAYS install necessary dependencies FIRST before generating any other artifact. If that requires a \`package.json\` then you should create that first!
IMPORTANT: Add all required dependencies to the \`package.json\` already and try to avoid \`npm i <pkg>\` if possible!
11. CRITICAL: Always provide the FULL, updated content of the artifact. This means:
- Include ALL code, even if parts are unchanged
- NEVER use placeholders like "// rest of the code remains the same..." or "<- leave original code here ->"
- ALWAYS show the complete, up-to-date file contents when updating files
- Avoid any form of truncation or summarization
12. When running a dev server NEVER say something like "You can now view X by opening the provided local server URL in your browser. The preview will be opened automatically or by the user manually!
13. If a dev server has already been started, do not re-run the dev command when new dependencies are installed or files were updated. Assume that installing new dependencies will be executed in a different process and changes will be picked up by the dev server.
14. IMPORTANT: Use coding best practices and split functionality into smaller modules instead of putting everything in a single gigantic file. Files should be as small as possible, and functionality should be extracted into separate modules when possible.
- Ensure code is clean, readable, and maintainable.
- Adhere to proper naming conventions and consistent formatting.
- Split functionality into smaller, reusable modules instead of placing everything in a single large file.
- Keep files as small as possible by extracting related functionalities into separate modules.
- Use imports to connect these modules together effectively.
</artifact_instructions>
</artifact_info>
NEVER use the word "artifact". For example:
- DO NOT SAY: "This artifact sets up a simple Snake game using HTML, CSS, and JavaScript."
- INSTEAD SAY: "We set up a simple Snake game using HTML, CSS, and JavaScript."
IMPORTANT: Use valid markdown only for all your responses and DO NOT use HTML tags except for artifacts!
IMPORTANT: Use valid markdown only for all your responses and DO NOT use HTML tags!
ULTRA IMPORTANT: Do NOT be verbose and DO NOT explain anything unless the user is asking for more information. That is VERY important.
ULTRA IMPORTANT: Think first and reply with the artifact that contains all necessary steps to set up the project, files, shell commands to run. It is SUPER IMPORTANT to respond with this first.
ULTRA IMPORTANT: Think first and reply with all the files needed to set up the project and get it running.
It is SUPER IMPORTANT to respond with this first. Create every needed file.
Here are some examples of correct usage of artifacts:
<example>
<user_query>Make a bouncing ball with real gravity using React</user_query>
<examples>
<example>
<user_query>Can you help me create a JavaScript function to calculate the factorial of a number?</user_query>
<assistant_response>
Certainly! I'll create a bouncing ball with real gravity using React. We'll use the react-spring library for physics-based animations.
<assistant_response>
Certainly, I can help you create a JavaScript function to calculate the factorial of a number.
<file path="package.json">
{
"name": "bouncing-ball",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-spring": "^9.7.1"
},
"devDependencies": {
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@vitejs/plugin-react": "^3.1.0",
"vite": "^4.2.0"
}
}
</file>
<boltArtifact id="factorial-function" title="JavaScript Factorial Function">
<boltAction type="file" filePath="index.js">
function factorial(n) {
...
}
<file path="index.html">
...
</file>
...
</boltAction>
<file path="src/main.jsx">
...
</file>
<boltAction type="shell">
node index.js
</boltAction>
</boltArtifact>
</assistant_response>
</example>
<file path="src/index.css">
...
</file>
<example>
<user_query>Build a snake game</user_query>
<file path="src/App.jsx">
...
</file>
<assistant_response>
Certainly! I'd be happy to help you build a snake game using JavaScript and HTML5 Canvas. This will be a basic implementation that you can later expand upon. Let's create the game step by step.
<boltArtifact id="snake-game" title="Snake Game in HTML and JavaScript">
<boltAction type="file" filePath="package.json">
{
"name": "snake",
"scripts": {
"dev": "vite"
}
...
}
</boltAction>
<boltAction type="shell">
npm install --save-dev vite
</boltAction>
<boltAction type="file" filePath="index.html">
...
</boltAction>
<boltAction type="start">
npm run dev
</boltAction>
</boltArtifact>
Now you can play the Snake game by opening the provided local server URL in your browser. Use the arrow keys to control the snake. Eat the red food to grow and increase your score. The game ends if you hit the wall or your own tail.
</assistant_response>
</example>
<example>
<user_query>Make a bouncing ball with real gravity using React</user_query>
<assistant_response>
Certainly! I'll create a bouncing ball with real gravity using React. We'll use the react-spring library for physics-based animations.
<boltArtifact id="bouncing-ball-react" title="Bouncing Ball with Gravity in React">
<boltAction type="file" filePath="package.json">
{
"name": "bouncing-ball",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"preview": "vite preview"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-spring": "^9.7.1"
},
"devDependencies": {
"@types/react": "^18.0.28",
"@types/react-dom": "^18.0.11",
"@vitejs/plugin-react": "^3.1.0",
"vite": "^4.2.0"
}
}
</boltAction>
<boltAction type="file" filePath="index.html">
...
</boltAction>
<boltAction type="file" filePath="src/main.jsx">
...
</boltAction>
<boltAction type="file" filePath="src/index.css">
...
</boltAction>
<boltAction type="file" filePath="src/App.jsx">
...
</boltAction>
<boltAction type="start">
npm run dev
</boltAction>
</boltArtifact>
You can now view the bouncing ball animation in the preview. The ball will start falling from the top of the screen and bounce realistically when it hits the bottom.
</assistant_response>
</example>
</examples>
You can now view the bouncing ball animation in the preview. The ball will start falling from the top of the screen and bounce realistically when it hits the bottom.
</assistant_response>
</example>
`;
export const CONTINUE_PROMPT = stripIndents`

View File

@@ -1,15 +0,0 @@
import type { Message } from 'ai';
export async function shouldUseSimulation(messages: Message[], messageInput: string) {
const requestBody: any = {
messageInput,
};
const response = await fetch('/api/use-simulation', {
method: 'POST',
body: JSON.stringify(requestBody),
});
const result = await response.json() as any;
return "useSimulation" in result && !!result.useSimulation;
}

View File

@@ -7,6 +7,10 @@ export function assert(condition: any, message: string = "Assertion failed!"): a
}
}
export function generateRandomId() {
return Math.random().toString(16).substring(2, 10);
}
export function defer<T>(): { promise: Promise<T>; resolve: (value: T) => void; reject: (reason?: any) => void } {
let resolve: (value: T) => void;
let reject: (reason?: any) => void;

View File

@@ -4,8 +4,12 @@
import type { Message } from 'ai';
import type { SimulationData, SimulationPacket } from './SimulationData';
import { SimulationDataVersion } from './SimulationData';
import { assert, ProtocolClient } from './ReplayProtocolClient';
import { assert, generateRandomId, ProtocolClient } from './ReplayProtocolClient';
import type { MouseData } from './Recording';
import type { FileMap } from '../stores/files';
import { shouldIncludeFile } from '~/utils/fileUtils';
import { DeveloperSystemPrompt } from '../common/prompts/prompts';
import { detectProjectCommands } from '~/utils/projectCommands';
function createRepositoryContentsPacket(contents: string): SimulationPacket {
return {
@@ -15,12 +19,36 @@ function createRepositoryContentsPacket(contents: string): SimulationPacket {
};
}
export type ProtocolMessage = {
role: "user" | "assistant" | "system";
type ProtocolMessageRole = "user" | "assistant" | "system";
type ProtocolMessageText = {
type: "text";
role: ProtocolMessageRole;
content: string;
};
type ProtocolMessageImage = {
type: "image";
role: ProtocolMessageRole;
dataURL: string;
};
export type ProtocolMessage = ProtocolMessageText | ProtocolMessageImage;
export type ProtocolFile = {
path: string;
content: string;
base64?: boolean;
};
type ChatResponsePartCallback = (response: string) => void;
interface ChatMessageOptions {
chatOnly?: boolean;
developerFiles?: ProtocolFile[];
onResponsePart?: ChatResponsePartCallback;
}
class ChatManager {
// Empty if this chat has been destroyed.
client: ProtocolClient | undefined;
@@ -121,25 +149,63 @@ class ChatManager {
return allData;
}
async sendChatMessage(messages: ProtocolMessage[]) {
async sendChatMessage(messages: ProtocolMessage[], options?: ChatMessageOptions) {
assert(this.client, "Chat has been destroyed");
const responseId = `response-${generateRandomId()}`;
let response: string = "";
this.client.listenForMessage("Nut.chatResponsePart", ({ message }: { message: ProtocolMessage }) => {
console.log("ChatResponsePart", message);
response += message.content;
const removeResponseListener = this.client.listenForMessage("Nut.chatResponsePart", ({ responseId: eventResponseId, message }: { responseId: string, message: ProtocolMessage }) => {
if (responseId == eventResponseId) {
if (message.type == "text") {
response += message.content;
options?.onResponsePart?.(message.content);
}
}
});
const modifiedFiles: ProtocolFile[] = [];
const removeFileListener = this.client.listenForMessage("Nut.chatModifiedFile", ({ responseId: eventResponseId, file }: { responseId: string, file: ProtocolFile }) => {
if (responseId == eventResponseId) {
console.log("ChatModifiedFile", file);
modifiedFiles.push(file);
const content = `
<boltArtifact id="modified-file-${generateRandomId()}" title="File Changes">
<boltAction type="file" filePath="${file.path}">${file.content}</boltAction>
</boltArtifact>
`;
response += content;
options?.onResponsePart?.(content);
}
});
const responseId = "<response-id>";
const chatId = await this.chatIdPromise;
console.log("ChatSendMessage", new Date().toISOString(), chatId, JSON.stringify(messages));
console.log("ChatSendMessage", new Date().toISOString(), chatId, JSON.stringify({ messages, developerFiles: options?.developerFiles }));
await this.client.sendCommand({
method: "Nut.sendChatMessage",
params: { chatId, responseId, messages },
params: { chatId, responseId, messages, chatOnly: options?.chatOnly, developerFiles: options?.developerFiles },
});
removeResponseListener();
removeFileListener();
if (modifiedFiles.length) {
const commands = await detectProjectCommands(modifiedFiles);
const content = `
<boltArtifact id="project-setup" title="Project Setup">
<boltAction type="shell">${commands.setupCommand}</boltAction>
</boltArtifact>
`;
response += content;
options?.onResponsePart?.(content);
}
return response;
}
}
@@ -208,7 +274,7 @@ export function getLastSimulationChatMessages(): ProtocolMessage[] | undefined {
return gLastSimulationChatMessages;
}
const SystemPrompt = `
const SimulationSystemPrompt = `
The following user message describes a bug or other problem on the page which needs to be fixed.
You must respond with a useful explanation that will help the user understand the source of the problem.
Do not describe the specific fix needed.
@@ -222,7 +288,7 @@ export async function getSimulationEnhancedPrompt(
assert(gChatManager, "Chat not started");
assert(gChatManager.simulationFinished, "Simulation not finished");
let system = SystemPrompt;
let system = SimulationSystemPrompt;
if (mouseData) {
system += `The user pointed to an element on the page <element selector=${JSON.stringify(mouseData.selector)} height=${mouseData.height} width=${mouseData.width} x=${mouseData.x} y=${mouseData.y} />`;
}
@@ -242,5 +308,145 @@ export async function getSimulationEnhancedPrompt(
gLastSimulationChatMessages = messages;
return gChatManager.sendChatMessage(messages);
return await gChatManager.sendChatMessage(messages);
}
export async function shouldUseSimulation(messageInput: string) {
if (!gChatManager) {
gChatManager = new ChatManager();
}
const systemPrompt = `
You are a helpful assistant that determines whether a user's message that is asking an AI
to make a change to an application should first perform a detailed analysis of the application's
behavior to generate a better answer.
This is most helpful when the user is asking the AI to fix a problem with the application.
When making straightforward improvements to the application a detailed analysis is not necessary.
The text of the user's message will be wrapped in \`<user_message>\` tags. You must describe your
reasoning and then respond with either \`<analyze>true</analyze>\` or \`<analyze>false</analyze>\`.
`;
const userMessage = `
Here is the user message you need to evaluate: <user_message>${messageInput}</user_message>
`;
const messages: ProtocolMessage[] = [
{
role: "system",
type: "text",
content: systemPrompt,
},
{
role: "user",
type: "text",
content: userMessage,
},
];
const response = await gChatManager.sendChatMessage(messages, { chatOnly: true });
console.log("UseSimulationResponse", response);
const match = /<analyze>(.*?)<\/analyze>/.exec(response);
if (match) {
return match[1] === "true";
}
return false;
}
function getProtocolRule(message: Message): "user" | "assistant" | "system" {
switch (message.role) {
case "user":
return "user";
case "assistant":
case "data":
return "assistant";
case "system":
return "system";
}
}
function removeBoltArtifacts(text: string): string {
const OpenTag = "<boltArtifact";
const CloseTag = "</boltArtifact>";
while (true) {
const openTag = text.indexOf(OpenTag);
if (openTag === -1) {
break;
}
const prefix = text.substring(0, openTag);
const closeTag = text.indexOf(CloseTag, openTag + OpenTag.length);
if (closeTag === -1) {
text = prefix;
} else {
text = prefix + text.substring(closeTag + CloseTag.length);
}
}
return text;
}
function buildProtocolMessages(messages: Message[]): ProtocolMessage[] {
const rv: ProtocolMessage[] = [];
for (const msg of messages) {
const role = getProtocolRule(msg);
if (Array.isArray(msg.content)) {
for (const content of msg.content) {
switch (content.type) {
case "text":
rv.push({
role,
type: "text",
content: removeBoltArtifacts(content.text),
});
break;
case "image":
rv.push({
role,
type: "image",
dataURL: content.image,
});
break;
default:
console.error("Unknown message content", content);
}
}
} else if (typeof msg.content == "string") {
rv.push({
role,
type: "text",
content: msg.content,
});
}
}
return rv;
}
export async function sendDeveloperChatMessage(messages: Message[], files: FileMap, onResponsePart: ChatResponsePartCallback) {
if (!gChatManager) {
gChatManager = new ChatManager();
}
const developerFiles: ProtocolFile[] = [];
for (const [path, file] of Object.entries(files)) {
if (file?.type == "file" && shouldIncludeFile(path)) {
developerFiles.push({
path,
content: file.content,
});
}
}
const protocolMessages = buildProtocolMessages(messages);
protocolMessages.unshift({
role: "system",
type: "text",
content: DeveloperSystemPrompt,
});
return gChatManager.sendChatMessage(protocolMessages, { chatOnly: true, developerFiles, onResponsePart });
}

View File

@@ -6,7 +6,7 @@ import { createScopedLogger } from '~/utils/logger';
import { unreachable } from '~/utils/unreachable';
import type { ActionCallbackData } from './message-parser';
import type { BoltShell } from '~/utils/shell';
import { resetChatFileWritten } from '~/components/chat/Chat.client';
import { onRepositoryFileWritten } from '~/components/chat/Chat.client';
const logger = createScopedLogger('ActionRunner');
@@ -295,7 +295,7 @@ export class ActionRunner {
try {
await webcontainer.fs.writeFile(relativePath, action.content);
resetChatFileWritten();
onRepositoryFileWritten();
logger.debug(`File written ${relativePath}`);
} catch (error) {
logger.error('Failed to write file\n\n', error);

View File

@@ -236,9 +236,8 @@ export class StreamingMessageParser {
this._options.callbacks?.onArtifactOpen?.({ messageId, ...currentArtifact });
const artifactFactory = this._options.artifactElement ?? createArtifactElement;
output += artifactFactory({ messageId });
//const artifactFactory = this._options.artifactElement ?? createArtifactElement;
//output += artifactFactory({ messageId });
i = openTagEnd + 1;
} else {

View File

@@ -53,6 +53,9 @@ export class WorkbenchStore {
modifiedFiles = new Set<string>();
artifactIdList: string[] = [];
#globalExecutionQueue = Promise.resolve();
private fileMap: FileMap = {};
constructor() {
if (import.meta.hot) {
import.meta.hot.data.artifacts = this.artifacts;
@@ -339,6 +342,12 @@ export class WorkbenchStore {
const wc = await webcontainer;
const fullPath = nodePath.join(wc.workdir, data.action.filePath);
this.fileMap[fullPath] = {
type: 'file',
content: data.action.content,
isBinary: false,
};
if (this.selectedFile.value !== fullPath) {
this.setSelectedFile(fullPath);
}
@@ -375,7 +384,7 @@ export class WorkbenchStore {
private async generateZip() {
const zip = new JSZip();
const files = this.files.get();
const files = this.fileMap;
// Get the project name from the description input, or use a default name
const projectName = (description.value ?? 'project').toLocaleLowerCase().split(' ').join('_');

View File

@@ -1,103 +0,0 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
import { ChatStreamController } from '~/utils/chatStreamController';
import { assert } from '~/lib/replay/ReplayProtocolClient';
import { getStreamTextArguments, type FileMap, type Messages } from '~/lib/.server/llm/stream-text';
import { chatAnthropic, type ChatState } from '~/lib/.server/llm/chat-anthropic';
import { ensureOpenTelemetryInitialized } from '~/lib/.server/otel-wrapper';
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
}
// Directions given to the LLM when we have an enhanced prompt describing the bug to fix.
const EnhancedPromptPrefix = `
ULTRA IMPORTANT: Below is a detailed description of the bug.
Focus specifically on fixing this bug. Do not guess about other problems.
`;
async function chatAction({ context, request }: ActionFunctionArgs) {
ensureOpenTelemetryInitialized(context);
const { messages, files, promptId, simulationEnhancedPrompt, anthropicApiKey: clientAnthropicApiKey, loginKey } = await request.json<{
messages: Messages;
files: FileMap;
promptId?: string;
simulationEnhancedPrompt?: string;
anthropicApiKey?: string;
loginKey?: string;
}>();
let finished: (v?: any) => void;
context.cloudflare.ctx.waitUntil(new Promise((resolve) => finished = resolve));
console.log("SimulationEnhancedPrompt", simulationEnhancedPrompt);
try {
const { system, messages: coreMessages } = await getStreamTextArguments({
messages,
env: context.cloudflare.env,
apiKeys: {},
files,
providerSettings: undefined,
promptId,
});
const apiKey = clientAnthropicApiKey ?? context.cloudflare.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error("Anthropic API key is not set");
}
const chatState: ChatState = {
apiKey,
isUser: !!clientAnthropicApiKey,
userLoginKey: loginKey,
infos: [],
};
const resultStream = new ReadableStream({
async start(controller) {
const chatController = new ChatStreamController(controller);
if (simulationEnhancedPrompt) {
const lastMessage = coreMessages[coreMessages.length - 1];
assert(lastMessage.role == "user", "Last message must be a user message");
assert(lastMessage.content.length > 0, "Last message must have content");
const lastContent = lastMessage.content[0];
assert(typeof lastContent == "object" && lastContent.type == "text", "Last message content must be text");
lastContent.text += `\n\n${EnhancedPromptPrefix}\n\n${simulationEnhancedPrompt}`;
}
try {
await chatAnthropic(chatState, chatController, files, system, coreMessages);
} catch (e) {
console.error(e);
chatController.writeText(`Error chatting with Anthropic: ${e}`);
}
controller.close();
setTimeout(finished, 1000);
},
});
return new Response(resultStream, {
status: 200,
headers: {
contentType: 'text/plain; charset=utf-8',
},
});
} catch (error: any) {
console.error(error);
if (error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', {
status: 401,
statusText: 'Unauthorized',
});
}
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
}

View File

@@ -1,131 +0,0 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
//import { StreamingTextResponse, parseStreamPart } from 'ai';
import { streamText } from '~/lib/.server/llm/stream-text';
import { stripIndents } from '~/utils/stripIndent';
import type { IProviderSetting, ProviderInfo } from '~/types/model';
export async function action(args: ActionFunctionArgs) {
return enhancerAction(args);
}
function parseCookies(cookieHeader: string) {
const cookies: any = {};
// Split the cookie string by semicolons and spaces
const items = cookieHeader.split(';').map((cookie) => cookie.trim());
items.forEach((item) => {
const [name, ...rest] = item.split('=');
if (name && rest) {
// Decode the name and value, and join value parts in case it contains '='
const decodedName = decodeURIComponent(name.trim());
const decodedValue = decodeURIComponent(rest.join('=').trim());
cookies[decodedName] = decodedValue;
}
});
return cookies;
}
async function enhancerAction({ context, request }: ActionFunctionArgs) {
const { message, model, provider } = await request.json<{
message: string;
model: string;
provider: ProviderInfo;
apiKeys?: Record<string, string>;
}>();
const { name: providerName } = provider;
// validate 'model' and 'provider' fields
if (!model || typeof model !== 'string') {
throw new Response('Invalid or missing model', {
status: 400,
statusText: 'Bad Request',
});
}
if (!providerName || typeof providerName !== 'string') {
throw new Response('Invalid or missing provider', {
status: 400,
statusText: 'Bad Request',
});
}
const cookieHeader = request.headers.get('Cookie');
// Parse the cookie's value (returns an object or null if no cookie exists)
const apiKeys = JSON.parse(parseCookies(cookieHeader || '').apiKeys || '{}');
const providerSettings: Record<string, IProviderSetting> = JSON.parse(
parseCookies(cookieHeader || '').providers || '{}',
);
try {
const result = await streamText({
messages: [
{
role: 'user',
content:
`[Model: ${model}]\n\n[Provider: ${providerName}]\n\n` +
stripIndents`
You are a professional prompt engineer specializing in crafting precise, effective prompts.
Your task is to enhance prompts by making them more specific, actionable, and effective.
I want you to improve the user prompt that is wrapped in \`<original_prompt>\` tags.
For valid prompts:
- Make instructions explicit and unambiguous
- Add relevant context and constraints
- Remove redundant information
- Maintain the core intent
- Ensure the prompt is self-contained
- Use professional language
For invalid or unclear prompts:
- Respond with clear, professional guidance
- Keep responses concise and actionable
- Maintain a helpful, constructive tone
- Focus on what the user should provide
- Use a standard template for consistency
IMPORTANT: Your response must ONLY contain the enhanced prompt text.
Do not include any explanations, metadata, or wrapper tags.
<original_prompt>
${message}
</original_prompt>
`,
},
],
env: context.cloudflare.env,
apiKeys,
providerSettings,
});
return new Response(result.textStream, {
status: 200,
headers: {
'Content-Type': 'text/event-stream',
Connection: 'keep-alive',
'Cache-Control': 'no-cache',
'Text-Encoding': 'chunked',
},
});
} catch (error: unknown) {
console.log(error);
if (error instanceof Error && error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', {
status: 401,
statusText: 'Unauthorized',
});
}
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
}

View File

@@ -1,163 +0,0 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
//import { StreamingTextResponse, parseStreamPart } from 'ai';
import { streamText } from '~/lib/.server/llm/stream-text';
import type { IProviderSetting, ProviderInfo } from '~/types/model';
import { generateText } from 'ai';
import { getModelList, PROVIDER_LIST } from '~/utils/constants';
import { MAX_TOKENS } from '~/lib/.server/llm/constants';
export async function action(args: ActionFunctionArgs) {
return llmCallAction(args);
}
function parseCookies(cookieHeader: string) {
const cookies: any = {};
// Split the cookie string by semicolons and spaces
const items = cookieHeader.split(';').map((cookie) => cookie.trim());
items.forEach((item) => {
const [name, ...rest] = item.split('=');
if (name && rest) {
// Decode the name and value, and join value parts in case it contains '='
const decodedName = decodeURIComponent(name.trim());
const decodedValue = decodeURIComponent(rest.join('=').trim());
cookies[decodedName] = decodedValue;
}
});
return cookies;
}
async function llmCallAction({ context, request }: ActionFunctionArgs) {
const { system, message, model, provider, streamOutput } = await request.json<{
system: string;
message: string;
model: string;
provider: ProviderInfo;
streamOutput?: boolean;
}>();
const { name: providerName } = provider;
// validate 'model' and 'provider' fields
if (!model || typeof model !== 'string') {
throw new Response('Invalid or missing model', {
status: 400,
statusText: 'Bad Request',
});
}
if (!providerName || typeof providerName !== 'string') {
throw new Response('Invalid or missing provider', {
status: 400,
statusText: 'Bad Request',
});
}
const cookieHeader = request.headers.get('Cookie');
// Parse the cookie's value (returns an object or null if no cookie exists)
const apiKeys = JSON.parse(parseCookies(cookieHeader || '').apiKeys || '{}');
const providerSettings: Record<string, IProviderSetting> = JSON.parse(
parseCookies(cookieHeader || '').providers || '{}',
);
if (streamOutput) {
try {
const result = await streamText({
options: {
system,
},
messages: [
{
role: 'user',
content: `${message}`,
},
],
env: context.cloudflare.env,
apiKeys,
providerSettings,
});
return new Response(result.textStream, {
status: 200,
headers: {
'Content-Type': 'text/plain; charset=utf-8',
},
});
} catch (error: unknown) {
console.log(error);
if (error instanceof Error && error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', {
status: 401,
statusText: 'Unauthorized',
});
}
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
} else {
try {
const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: context.cloudflare.env as any });
const modelDetails = MODEL_LIST.find((m) => m.name === model);
if (!modelDetails) {
throw new Error('Model not found');
}
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
const providerInfo = PROVIDER_LIST.find((p) => p.name === provider.name);
if (!providerInfo) {
throw new Error('Provider not found');
}
const result = await generateText({
system,
messages: [
{
role: 'user',
content: `${message}`,
},
],
model: providerInfo.getModelInstance({
model: modelDetails.name,
serverEnv: context.cloudflare.env as any,
apiKeys,
providerSettings,
}),
maxTokens: dynamicMaxTokens,
toolChoice: 'none',
});
return new Response(JSON.stringify(result), {
status: 200,
headers: {
'Content-Type': 'application/json',
},
});
} catch (error: unknown) {
console.log(error);
if (error instanceof Error && error.message?.includes('API key')) {
throw new Response('Invalid or missing API key', {
status: 401,
statusText: 'Unauthorized',
});
}
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
}
}

View File

@@ -1,54 +0,0 @@
import { json, type ActionFunctionArgs } from '@remix-run/cloudflare';
import { callAnthropic, type ChatState } from '~/lib/.server/llm/chat-anthropic';
import type { MessageParam } from '@anthropic-ai/sdk/resources/messages/messages.mjs';
export async function action(args: ActionFunctionArgs) {
return useSimulationAction(args);
}
async function useSimulationAction({ context, request }: ActionFunctionArgs) {
const { messageInput } = await request.json<{
messageInput: string;
}>();
const apiKey = context.cloudflare.env.ANTHROPIC_API_KEY;
if (!apiKey) {
throw new Error("Anthropic API key is not set");
}
const chatState: ChatState = {
apiKey,
isUser: false,
infos: [],
};
const systemPrompt = `
You are a helpful assistant that determines whether a user's message that is asking an AI
to make a change to an application should first perform a detailed analysis of the application's
behavior to generate a better answer.
This is most helpful when the user is asking the AI to fix a problem with the application.
When making straightforward improvements to the application a detailed analysis is not necessary.
The text of the user's message will be wrapped in \`<user_message>\` tags. You must describe your
reasoning and then respond with either \`<analyze>true</analyze>\` or \`<analyze>false</analyze>\`.
`;
const message: MessageParam = {
role: "user",
content: `Here is the user message you need to evaluate: <user_message>${messageInput}</user_message>`,
};
const { responseText } = await callAnthropic(chatState, "UseSimulation", systemPrompt, [message]);
console.log("UseSimulationResponse", responseText);
const match = /<analyze>(.*?)<\/analyze>/.exec(responseText);
if (match) {
const useSimulation = match[1] === "true";
return json({ useSimulation });
} else {
return json({ useSimulation: false });
}
}

View File

@@ -1,44 +0,0 @@
// Define the ChatStreamController class for writing messages to a readable
// stream which will be decoded by the react/ai Chat API. There does not seem
// to be functionality exported from the associated packages to do this so
// for now we do it manually after reverse engineering the protocol.
import type { ChatAnthropicInfo } from "~/lib/.server/llm/chat-anthropic";
export interface ChatFileChange {
filePath: string;
contents: string;
}
export class ChatStreamController {
private controller: ReadableStreamDefaultController;
private encoder: TextEncoder;
constructor(controller: ReadableStreamDefaultController) {
this.controller = controller;
this.encoder = new TextEncoder();
}
writeText(text: string) {
const data = this.encoder.encode(`0:${JSON.stringify(text)}\n`);
this.controller.enqueue(data);
}
writeFileChanges(title: string, fileChanges: ChatFileChange[]) {
let text = `<boltArtifact title="${title}">`;
for (const fileChange of fileChanges) {
text += `<boltAction type="file" filePath="${fileChange.filePath}">${fileChange.contents}</boltAction>`;
}
text += "</boltArtifact>";
this.writeText(text);
}
writeAnnotation(type: string, value: any) {
const data = this.encoder.encode(`8:[{"type":"${type}","value":${JSON.stringify(value)}}]\n`);
this.controller.enqueue(data);
}
writeUsage({ chatInfo, completionTokens, promptTokens }: { chatInfo: ChatAnthropicInfo, completionTokens: number, promptTokens: number }) {
this.writeAnnotation("usage", { chatInfo, completionTokens, promptTokens, totalTokens: completionTokens + promptTokens });
}
}

View File

@@ -46,65 +46,3 @@ export const shouldIncludeFile = (path: string): boolean => {
}
return !ig.ignores(path);
};
const readPackageJson = async (files: File[]): Promise<{ scripts?: Record<string, string> } | null> => {
const packageJsonFile = files.find((f) => f.webkitRelativePath.endsWith('package.json'));
if (!packageJsonFile) {
return null;
}
try {
const content = await new Promise<string>((resolve, reject) => {
const reader = new FileReader();
reader.onload = () => resolve(reader.result as string);
reader.onerror = reject;
reader.readAsText(packageJsonFile);
});
return JSON.parse(content);
} catch (error) {
console.error('Error reading package.json:', error);
return null;
}
};
export const detectProjectType = async (
files: File[],
): Promise<{ type: string; setupCommand: string; followupMessage: string }> => {
const hasFile = (name: string) => files.some((f) => f.webkitRelativePath.endsWith(name));
if (hasFile('package.json')) {
const packageJson = await readPackageJson(files);
const scripts = packageJson?.scripts || {};
// Check for preferred commands in priority order
const preferredCommands = ['dev', 'start', 'preview'];
const availableCommand = preferredCommands.find((cmd) => scripts[cmd]);
if (availableCommand) {
return {
type: 'Node.js',
setupCommand: `npm install && npm run ${availableCommand}`,
followupMessage: `Found "${availableCommand}" script in package.json. Running "npm run ${availableCommand}" after installation.`,
};
}
return {
type: 'Node.js',
setupCommand: 'npm install',
followupMessage:
'Would you like me to inspect package.json to determine the available scripts for running this project?',
};
}
if (hasFile('index.html')) {
return {
type: 'Static',
setupCommand: 'npx --yes serve',
followupMessage: '',
};
}
return { type: '', setupCommand: '', followupMessage: '' };
};