.*?<\/think>/s, '');
return { ...message, content };
}
diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts
index 374610c7..29579c9f 100644
--- a/app/lib/.server/llm/stream-text.ts
+++ b/app/lib/.server/llm/stream-text.ts
@@ -7,7 +7,7 @@ import { PromptLibrary } from '~/lib/common/prompt-library';
import { allowedHTMLElements } from '~/utils/markdown';
import { LLMManager } from '~/lib/modules/llm/manager';
import { createScopedLogger } from '~/utils/logger';
-import { createFilesContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils';
+import { createFilesContext, extractPropertiesFromMessage } from './utils';
import { getFilePaths } from './select-context';
export type Messages = Message[];
@@ -27,6 +27,7 @@ export async function streamText(props: {
contextOptimization?: boolean;
contextFiles?: FileMap;
summary?: string;
+ messageSliceId?: number;
}) {
const {
messages,
@@ -51,10 +52,8 @@ export async function streamText(props: {
return { ...message, content };
} else if (message.role == 'assistant') {
let content = message.content;
-
- if (contextOptimization) {
- content = simplifyBoltActions(content);
- }
+ content = content.replace(/.*?<\/div>/s, '');
+ content = content.replace(/.*?<\/think>/s, '');
return { ...message, content };
}
@@ -110,7 +109,7 @@ Below are all the files present in the project:
${filePaths.join('\n')}
---
-Below is the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
+Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
CONTEXT BUFFER:
---
${codeContext}
@@ -126,10 +125,14 @@ ${props.summary}
---
`;
- const lastMessage = processedMessages.pop();
+ if (props.messageSliceId) {
+ processedMessages = processedMessages.slice(props.messageSliceId);
+ } else {
+ const lastMessage = processedMessages.pop();
- if (lastMessage) {
- processedMessages = [lastMessage];
+ if (lastMessage) {
+ processedMessages = [lastMessage];
+ }
}
}
}
diff --git a/app/lib/.server/llm/utils.ts b/app/lib/.server/llm/utils.ts
index 9aac0891..e019a929 100644
--- a/app/lib/.server/llm/utils.ts
+++ b/app/lib/.server/llm/utils.ts
@@ -82,10 +82,10 @@ export function createFilesContext(files: FileMap, useRelativePath?: boolean) {
filePath = path.replace('/home/project/', '');
}
- return `\n${codeWithLinesNumbers}\n`;
+ return `${codeWithLinesNumbers}`;
});
- return `${fileContexts.join('\n\n')}\n\n`;
+ return `\n${fileContexts.join('\n')}\n`;
}
export function extractCurrentContext(messages: Message[]) {
diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts
index 8564bafd..bbecdae5 100644
--- a/app/routes/api.chat.ts
+++ b/app/routes/api.chat.ts
@@ -10,6 +10,7 @@ import { getFilePaths, selectContext } from '~/lib/.server/llm/select-context';
import type { ContextAnnotation, ProgressAnnotation } from '~/types/context';
import { WORK_DIR } from '~/utils/constants';
import { createSummary } from '~/lib/.server/llm/create-summary';
+import { extractPropertiesFromMessage } from '~/lib/.server/llm/utils';
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
@@ -70,15 +71,21 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
const filePaths = getFilePaths(files || {});
let filteredFiles: FileMap | undefined = undefined;
let summary: string | undefined = undefined;
+ let messageSliceId = 0;
+
+ if (messages.length > 3) {
+ messageSliceId = messages.length - 3;
+ }
if (filePaths.length > 0 && contextOptimization) {
- dataStream.writeData('HI ');
logger.debug('Generating Chat Summary');
- dataStream.writeMessageAnnotation({
+ dataStream.writeData({
type: 'progress',
- value: progressCounter++,
- message: 'Generating Chat Summary',
- } as ProgressAnnotation);
+ label: 'summary',
+ status: 'in-progress',
+ order: progressCounter++,
+ message: 'Analysing Request',
+ } satisfies ProgressAnnotation);
// Create a summary of the chat
console.log(`Messages count: ${messages.length}`);
@@ -99,6 +106,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}
},
});
+ dataStream.writeData({
+ type: 'progress',
+ label: 'summary',
+ status: 'complete',
+ order: progressCounter++,
+ message: 'Analysis Complete',
+ } satisfies ProgressAnnotation);
dataStream.writeMessageAnnotation({
type: 'chatSummary',
@@ -108,11 +122,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
// Update context buffer
logger.debug('Updating Context Buffer');
- dataStream.writeMessageAnnotation({
+ dataStream.writeData({
type: 'progress',
- value: progressCounter++,
- message: 'Updating Context Buffer',
- } as ProgressAnnotation);
+ label: 'context',
+ status: 'in-progress',
+ order: progressCounter++,
+ message: 'Determining Files to Read',
+ } satisfies ProgressAnnotation);
// Select context files
console.log(`Messages count: ${messages.length}`);
@@ -152,12 +168,15 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}),
} as ContextAnnotation);
- dataStream.writeMessageAnnotation({
+ dataStream.writeData({
type: 'progress',
- value: progressCounter++,
- message: 'Context Buffer Updated',
- } as ProgressAnnotation);
- logger.debug('Context Buffer Updated');
+ label: 'context',
+ status: 'complete',
+ order: progressCounter++,
+ message: 'Code Files Selected',
+ } satisfies ProgressAnnotation);
+
+ // logger.debug('Code Files Selected');
}
// Stream the text
@@ -181,6 +200,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
totalTokens: cumulativeUsage.totalTokens,
},
});
+ dataStream.writeData({
+ type: 'progress',
+ label: 'response',
+ status: 'complete',
+ order: progressCounter++,
+ message: 'Response Generated',
+ } satisfies ProgressAnnotation);
await new Promise((resolve) => setTimeout(resolve, 0));
// stream.close();
@@ -195,8 +221,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
+ const lastUserMessage = messages.filter((x) => x.role == 'user').slice(-1)[0];
+ const { model, provider } = extractPropertiesFromMessage(lastUserMessage);
messages.push({ id: generateId(), role: 'assistant', content });
- messages.push({ id: generateId(), role: 'user', content: CONTINUE_PROMPT });
+ messages.push({
+ id: generateId(),
+ role: 'user',
+ content: `[Model: ${model}]\n\n[Provider: ${provider}]\n\n${CONTINUE_PROMPT}`,
+ });
const result = await streamText({
messages,
@@ -207,6 +239,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
providerSettings,
promptId,
contextOptimization,
+ contextFiles: filteredFiles,
+ summary,
+ messageSliceId,
});
result.mergeIntoDataStream(dataStream);
@@ -226,6 +261,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
},
};
+ dataStream.writeData({
+ type: 'progress',
+ label: 'response',
+ status: 'in-progress',
+ order: progressCounter++,
+ message: 'Generating Response',
+ } satisfies ProgressAnnotation);
+
const result = await streamText({
messages,
env: context.cloudflare?.env,
@@ -237,6 +280,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
contextOptimization,
contextFiles: filteredFiles,
summary,
+ messageSliceId,
});
(async () => {
diff --git a/app/routes/api.llmcall.ts b/app/routes/api.llmcall.ts
index 5dd4c098..cf75e499 100644
--- a/app/routes/api.llmcall.ts
+++ b/app/routes/api.llmcall.ts
@@ -7,6 +7,7 @@ import { MAX_TOKENS } from '~/lib/.server/llm/constants';
import { LLMManager } from '~/lib/modules/llm/manager';
import type { ModelInfo } from '~/lib/modules/llm/types';
import { getApiKeysFromCookie, getProviderSettingsFromCookie } from '~/lib/api/cookies';
+import { createScopedLogger } from '~/utils/logger';
export async function action(args: ActionFunctionArgs) {
return llmCallAction(args);
@@ -21,6 +22,8 @@ async function getModelList(options: {
return llmManager.updateModelList(options);
}
+const logger = createScopedLogger('api.llmcall');
+
async function llmCallAction({ context, request }: ActionFunctionArgs) {
const { system, message, model, provider, streamOutput } = await request.json<{
system: string;
@@ -106,6 +109,8 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
throw new Error('Provider not found');
}
+ logger.info(`Generating response Provider: ${provider.name}, Model: ${modelDetails.name}`);
+
const result = await generateText({
system,
messages: [
@@ -123,6 +128,7 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
maxTokens: dynamicMaxTokens,
toolChoice: 'none',
});
+ logger.info(`Generated response`);
return new Response(JSON.stringify(result), {
status: 200,
diff --git a/app/types/context.ts b/app/types/context.ts
index 4d8ea02b..75c21db7 100644
--- a/app/types/context.ts
+++ b/app/types/context.ts
@@ -11,6 +11,8 @@ export type ContextAnnotation =
export type ProgressAnnotation = {
type: 'progress';
- value: number;
+ label: string;
+ status: 'in-progress' | 'complete';
+ order: number;
message: string;
};
diff --git a/app/utils/selectStarterTemplate.ts b/app/utils/selectStarterTemplate.ts
index 4a7536da..0bbb1892 100644
--- a/app/utils/selectStarterTemplate.ts
+++ b/app/utils/selectStarterTemplate.ts
@@ -59,6 +59,7 @@ Instructions:
5. If no perfect match exists, recommend the closest option
Important: Provide only the selection tags in your response, no additional text.
+MOST IMPORTANT: YOU DONT HAVE TIME TO THINK JUST START RESPONDING BASED ON HUNCH
`;
const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));