feat: enhanced Code Context and Project Summary Features (#1191)
Some checks are pending
Docker Publish / docker-build-publish (push) Waiting to run
Update Stable Branch / prepare-release (push) Waiting to run

* fix: docker prod env variable fix

* lint and typecheck

* removed hardcoded tag

* better summary generation

* improved  summary generation for context optimization

* remove think tags from the generation
This commit is contained in:
Anirban Kar 2025-01-29 15:37:20 +05:30 committed by GitHub
parent a199295ad8
commit 7016111906
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 416 additions and 86 deletions

View File

@ -1,23 +1,55 @@
import { memo } from 'react';
import { Markdown } from './Markdown';
import type { JSONValue } from 'ai';
import type { ProgressAnnotation } from '~/types/context';
import Popover from '~/components/ui/Popover';
import { workbenchStore } from '~/lib/stores/workbench';
import { WORK_DIR } from '~/utils/constants';
interface AssistantMessageProps {
content: string;
annotations?: JSONValue[];
}
function openArtifactInWorkbench(filePath: string) {
filePath = normalizedFilePath(filePath);
if (workbenchStore.currentView.get() !== 'code') {
workbenchStore.currentView.set('code');
}
workbenchStore.setSelectedFile(`${WORK_DIR}/${filePath}`);
}
function normalizedFilePath(path: string) {
let normalizedPath = path;
if (normalizedPath.startsWith(WORK_DIR)) {
normalizedPath = path.replace(WORK_DIR, '');
}
if (normalizedPath.startsWith('/')) {
normalizedPath = normalizedPath.slice(1);
}
return normalizedPath;
}
export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => {
const filteredAnnotations = (annotations?.filter(
(annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'),
) || []) as { type: string; value: any } & { [key: string]: any }[];
let progressAnnotation: ProgressAnnotation[] = filteredAnnotations.filter(
(annotation) => annotation.type === 'progress',
) as ProgressAnnotation[];
progressAnnotation = progressAnnotation.sort((a, b) => b.value - a.value);
let chatSummary: string | undefined = undefined;
if (filteredAnnotations.find((annotation) => annotation.type === 'chatSummary')) {
chatSummary = filteredAnnotations.find((annotation) => annotation.type === 'chatSummary')?.summary;
}
let codeContext: string[] | undefined = undefined;
if (filteredAnnotations.find((annotation) => annotation.type === 'codeContext')) {
codeContext = filteredAnnotations.find((annotation) => annotation.type === 'codeContext')?.files;
}
const usage: {
completionTokens: number;
@ -29,8 +61,44 @@ export const AssistantMessage = memo(({ content, annotations }: AssistantMessage
<div className="overflow-hidden w-full">
<>
<div className=" flex gap-2 items-center text-sm text-bolt-elements-textSecondary mb-2">
{progressAnnotation.length > 0 && (
<Popover trigger={<div className="i-ph:info" />}>{progressAnnotation[0].message}</Popover>
{(codeContext || chatSummary) && (
<Popover side="right" align="start" trigger={<div className="i-ph:info" />}>
{chatSummary && (
<div className="max-w-chat">
<div className="summary max-h-96 flex flex-col">
<h2 className="border border-bolt-elements-borderColor rounded-md p4">Summary</h2>
<div style={{ zoom: 0.7 }} className="overflow-y-auto m4">
<Markdown>{chatSummary}</Markdown>
</div>
</div>
{codeContext && (
<div className="code-context flex flex-col p4 border border-bolt-elements-borderColor rounded-md">
<h2>Context</h2>
<div className="flex gap-4 mt-4 bolt" style={{ zoom: 0.6 }}>
{codeContext.map((x) => {
const normalized = normalizedFilePath(x);
return (
<>
<code
className="bg-bolt-elements-artifacts-inlineCode-background text-bolt-elements-artifacts-inlineCode-text px-1.5 py-1 rounded-md text-bolt-elements-item-contentAccent hover:underline cursor-pointer"
onClick={(e) => {
e.preventDefault();
e.stopPropagation();
openArtifactInWorkbench(normalized);
}}
>
{normalized}
</code>
</>
);
})}
</div>
</div>
)}
</div>
)}
<div className="context"></div>
</Popover>
)}
{usage && (
<div>

View File

@ -2,7 +2,7 @@
* @ts-nocheck
* Preventing TS checks with files presented in the video for a better presentation.
*/
import type { Message } from 'ai';
import type { JSONValue, Message } from 'ai';
import React, { type RefCallback, useEffect, useState } from 'react';
import { ClientOnly } from 'remix-utils/client-only';
import { Menu } from '~/components/sidebar/Menu.client';
@ -32,6 +32,8 @@ import StarterTemplates from './StarterTemplates';
import type { ActionAlert } from '~/types/actions';
import ChatAlert from './ChatAlert';
import type { ModelInfo } from '~/lib/modules/llm/types';
import ProgressCompilation from './ProgressCompilation';
import type { ProgressAnnotation } from '~/types/context';
const TEXTAREA_MIN_HEIGHT = 76;
@ -64,6 +66,7 @@ interface BaseChatProps {
setImageDataList?: (dataList: string[]) => void;
actionAlert?: ActionAlert;
clearAlert?: () => void;
data?: JSONValue[] | undefined;
}
export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
@ -97,6 +100,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
messages,
actionAlert,
clearAlert,
data,
},
ref,
) => {
@ -108,7 +112,15 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
const [recognition, setRecognition] = useState<SpeechRecognition | null>(null);
const [transcript, setTranscript] = useState('');
const [isModelLoading, setIsModelLoading] = useState<string | undefined>('all');
const [progressAnnotations, setProgressAnnotations] = useState<ProgressAnnotation[]>([]);
useEffect(() => {
if (data) {
const progressList = data.filter(
(x) => typeof x === 'object' && (x as any).type === 'progress',
) as ProgressAnnotation[];
setProgressAnnotations(progressList);
}
}, [data]);
useEffect(() => {
console.log(transcript);
}, [transcript]);
@ -307,6 +319,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
className={classNames('pt-6 px-2 sm:px-6', {
'h-full flex flex-col': chatStarted,
})}
ref={scrollRef}
>
<ClientOnly>
{() => {
@ -337,6 +350,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
/>
)}
</div>
{progressAnnotations && <ProgressCompilation data={progressAnnotations} />}
<div
className={classNames(
'bg-bolt-elements-background-depth-2 p-3 rounded-lg border border-bolt-elements-borderColor relative w-full max-w-chat mx-auto z-prompt',

View File

@ -137,36 +137,49 @@ export const ChatImpl = memo(
const [apiKeys, setApiKeys] = useState<Record<string, string>>({});
const { messages, isLoading, input, handleInputChange, setInput, stop, append, setMessages, reload, error } =
useChat({
api: '/api/chat',
body: {
apiKeys,
files,
promptId,
contextOptimization: contextOptimizationEnabled,
},
sendExtraMessageFields: true,
onError: (e) => {
logger.error('Request failed\n\n', e, error);
toast.error(
'There was an error processing your request: ' + (e.message ? e.message : 'No details were returned'),
);
},
onFinish: (message, response) => {
const usage = response.usage;
const {
messages,
isLoading,
input,
handleInputChange,
setInput,
stop,
append,
setMessages,
reload,
error,
data: chatData,
setData,
} = useChat({
api: '/api/chat',
body: {
apiKeys,
files,
promptId,
contextOptimization: contextOptimizationEnabled,
},
sendExtraMessageFields: true,
onError: (e) => {
logger.error('Request failed\n\n', e, error);
toast.error(
'There was an error processing your request: ' + (e.message ? e.message : 'No details were returned'),
);
},
onFinish: (message, response) => {
const usage = response.usage;
setData(undefined);
if (usage) {
console.log('Token usage:', usage);
if (usage) {
console.log('Token usage:', usage);
// You can now use the usage data as needed
}
// You can now use the usage data as needed
}
logger.debug('Finished streaming');
},
initialMessages,
initialInput: Cookies.get(PROMPT_COOKIE_KEY) || '',
});
logger.debug('Finished streaming');
},
initialMessages,
initialInput: Cookies.get(PROMPT_COOKIE_KEY) || '',
});
useEffect(() => {
const prompt = searchParams.get('prompt');
@ -535,6 +548,7 @@ export const ChatImpl = memo(
setImageDataList={setImageDataList}
actionAlert={actionAlert}
clearAlert={() => workbenchStore.clearAlert()}
data={chatData}
/>
);
},

View File

@ -23,8 +23,6 @@ export const Markdown = memo(({ children, html = false, limitedMarkdown = false
const components = useMemo(() => {
return {
div: ({ className, children, node, ...props }) => {
console.log(className, node);
if (className?.includes('__boltArtifact__')) {
const messageId = node?.properties.dataMessageId as string;

View File

@ -0,0 +1,111 @@
import { AnimatePresence, motion } from 'framer-motion';
import React, { useState } from 'react';
import type { ProgressAnnotation } from '~/types/context';
import { classNames } from '~/utils/classNames';
import { cubicEasingFn } from '~/utils/easings';
export default function ProgressCompilation({ data }: { data?: ProgressAnnotation[] }) {
const [progressList, setProgressList] = React.useState<ProgressAnnotation[]>([]);
const [expanded, setExpanded] = useState(false);
React.useEffect(() => {
if (!data || data.length == 0) {
setProgressList([]);
return;
}
const progressMap = new Map<string, ProgressAnnotation>();
data.forEach((x) => {
const existingProgress = progressMap.get(x.label);
if (existingProgress && existingProgress.status === 'complete') {
return;
}
progressMap.set(x.label, x);
});
const newData = Array.from(progressMap.values());
newData.sort((a, b) => a.order - b.order);
setProgressList(newData);
}, [data]);
if (progressList.length === 0) {
return <></>;
}
return (
<AnimatePresence>
<div
className={classNames(
'bg-bolt-elements-background-depth-2',
'border border-bolt-elements-borderColor',
'shadow-lg rounded-lg relative w-full max-w-chat mx-auto z-prompt',
'p-1',
)}
style={{ transform: 'translateY(1rem)' }}
>
<div
className={classNames(
'bg-bolt-elements-item-backgroundAccent',
'p-1 rounded-lg text-bolt-elements-item-contentAccent',
'flex ',
)}
>
<div className="flex-1">
<AnimatePresence>
{expanded ? (
<motion.div
className="actions"
initial={{ height: 0 }}
animate={{ height: 'auto' }}
exit={{ height: '0px' }}
transition={{ duration: 0.15 }}
>
{progressList.map((x, i) => {
return <ProgressItem key={i} progress={x} />;
})}
</motion.div>
) : (
<ProgressItem progress={progressList.slice(-1)[0]} />
)}
</AnimatePresence>
</div>
<motion.button
initial={{ width: 0 }}
animate={{ width: 'auto' }}
exit={{ width: 0 }}
transition={{ duration: 0.15, ease: cubicEasingFn }}
className=" p-1 rounded-lg bg-bolt-elements-item-backgroundAccent hover:bg-bolt-elements-artifacts-backgroundHover"
onClick={() => setExpanded((v) => !v)}
>
<div className={expanded ? 'i-ph:caret-up-bold' : 'i-ph:caret-down-bold'}></div>
</motion.button>
</div>
</div>
</AnimatePresence>
);
}
const ProgressItem = ({ progress }: { progress: ProgressAnnotation }) => {
return (
<motion.div
className={classNames('flex text-sm gap-3')}
initial={{ opacity: 0 }}
animate={{ opacity: 1 }}
exit={{ opacity: 0 }}
transition={{ duration: 0.15 }}
>
<div className="flex items-center gap-1.5 ">
<div>
{progress.status === 'in-progress' ? (
<div className="i-svg-spinners:90-ring-with-bg"></div>
) : progress.status === 'complete' ? (
<div className="i-ph:check"></div>
) : null}
</div>
{/* {x.label} */}
</div>
{progress.message}
</motion.div>
);
};

View File

@ -1,15 +1,24 @@
import * as Popover from '@radix-ui/react-popover';
import type { PropsWithChildren, ReactNode } from 'react';
export default ({ children, trigger }: PropsWithChildren<{ trigger: ReactNode }>) => (
export default ({
children,
trigger,
side,
align,
}: PropsWithChildren<{
trigger: ReactNode;
side: 'top' | 'right' | 'bottom' | 'left' | undefined;
align: 'center' | 'start' | 'end' | undefined;
}>) => (
<Popover.Root>
<Popover.Trigger asChild>{trigger}</Popover.Trigger>
<Popover.Anchor />
<Popover.Portal>
<Popover.Content
sideOffset={10}
side="top"
align="center"
side={side}
align={align}
className="bg-bolt-elements-background-depth-2 text-bolt-elements-item-contentAccent p-2 rounded-md shadow-xl z-workbench"
>
{children}

View File

@ -16,7 +16,7 @@ export async function createSummary(props: {
contextOptimization?: boolean;
onFinish?: (resp: GenerateTextResult<Record<string, CoreTool<any, any>>, never>) => void;
}) {
const { messages, env: serverEnv, apiKeys, providerSettings, contextOptimization, onFinish } = props;
const { messages, env: serverEnv, apiKeys, providerSettings, onFinish } = props;
let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name;
const processedMessages = messages.map((message) => {
@ -29,9 +29,9 @@ export async function createSummary(props: {
} else if (message.role == 'assistant') {
let content = message.content;
if (contextOptimization) {
content = simplifyBoltActions(content);
}
content = simplifyBoltActions(content);
content = content.replace(/<div class=\\"__boltThought__\\">.*?<\/div>/s, '');
content = content.replace(/<think>.*?<\/think>/s, '');
return { ...message, content };
}
@ -92,6 +92,8 @@ ${summary.summary}`;
}
}
logger.debug('Sliced Messages:', slicedMessages.length);
const extractTextContent = (message: Message) =>
Array.isArray(message.content)
? (message.content.find((item) => item.type === 'text')?.text as string) || ''
@ -100,25 +102,82 @@ ${summary.summary}`;
// select files from the list of code file from the project that might be useful for the current request from the user
const resp = await generateText({
system: `
You are a software engineer. You are working on a project. tou need to summarize the work till now and provide a summary of the chat till now.
You are a software engineer. You are working on a project. you need to summarize the work till now and provide a summary of the chat till now.
${summaryText}
RULES:
* Only provide the summary of the chat till now.
* Do not provide any new information.
`,
prompt: `
please provide a summary of the chat till now.
below is the latest chat:
Please only use the following format to generate the summary:
---
# Project Overview
- **Project**: {project_name} - {brief_description}
- **Current Phase**: {phase}
- **Tech Stack**: {languages}, {frameworks}, {key_dependencies}
- **Environment**: {critical_env_details}
# Conversation Context
- **Last Topic**: {main_discussion_point}
- **Key Decisions**: {important_decisions_made}
- **User Context**:
- Technical Level: {expertise_level}
- Preferences: {coding_style_preferences}
- Communication: {preferred_explanation_style}
# Implementation Status
## Current State
- **Active Feature**: {feature_in_development}
- **Progress**: {what_works_and_what_doesn't}
- **Blockers**: {current_challenges}
## Code Evolution
- **Recent Changes**: {latest_modifications}
- **Working Patterns**: {successful_approaches}
- **Failed Approaches**: {attempted_solutions_that_failed}
# Requirements
- **Implemented**: {completed_features}
- **In Progress**: {current_focus}
- **Pending**: {upcoming_features}
- **Technical Constraints**: {critical_constraints}
# Critical Memory
- **Must Preserve**: {crucial_technical_context}
- **User Requirements**: {specific_user_needs}
- **Known Issues**: {documented_problems}
# Next Actions
- **Immediate**: {next_steps}
- **Open Questions**: {unresolved_issues}
---
Note:
4. Keep entries concise and focused on information needed for continuity
---
RULES:
* Only provide the whole summary of the chat till now.
* Do not provide any new information.
* DO not need to think too much just start writing imidiately
* do not write any thing other that the summary with with the provided structure
`,
prompt: `
Here is the previous summary of the chat:
<old_summary>
${summaryText}
</old_summary>
Below is the chat after that:
---
<new_chats>
${slicedMessages
.map((x) => {
return `---\n[${x.role}] ${extractTextContent(x)}\n---`;
})
.join('\n')}
</new_chats>
---
Please provide a summary of the chat till now including the hitorical summary of the chat.
`,
model: provider.getModelInstance({
model: currentModel,

View File

@ -23,7 +23,7 @@ export async function selectContext(props: {
summary: string;
onFinish?: (resp: GenerateTextResult<Record<string, CoreTool<any, any>>, never>) => void;
}) {
const { messages, env: serverEnv, apiKeys, files, providerSettings, contextOptimization, summary, onFinish } = props;
const { messages, env: serverEnv, apiKeys, files, providerSettings, summary, onFinish } = props;
let currentModel = DEFAULT_MODEL;
let currentProvider = DEFAULT_PROVIDER.name;
const processedMessages = messages.map((message) => {
@ -36,9 +36,10 @@ export async function selectContext(props: {
} else if (message.role == 'assistant') {
let content = message.content;
if (contextOptimization) {
content = simplifyBoltActions(content);
}
content = simplifyBoltActions(content);
content = content.replace(/<div class=\\"__boltThought__\\">.*?<\/div>/s, '');
content = content.replace(/<think>.*?<\/think>/s, '');
return { ...message, content };
}

View File

@ -7,7 +7,7 @@ import { PromptLibrary } from '~/lib/common/prompt-library';
import { allowedHTMLElements } from '~/utils/markdown';
import { LLMManager } from '~/lib/modules/llm/manager';
import { createScopedLogger } from '~/utils/logger';
import { createFilesContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils';
import { createFilesContext, extractPropertiesFromMessage } from './utils';
import { getFilePaths } from './select-context';
export type Messages = Message[];
@ -27,6 +27,7 @@ export async function streamText(props: {
contextOptimization?: boolean;
contextFiles?: FileMap;
summary?: string;
messageSliceId?: number;
}) {
const {
messages,
@ -51,10 +52,8 @@ export async function streamText(props: {
return { ...message, content };
} else if (message.role == 'assistant') {
let content = message.content;
if (contextOptimization) {
content = simplifyBoltActions(content);
}
content = content.replace(/<div class=\\"__boltThought__\\">.*?<\/div>/s, '');
content = content.replace(/<think>.*?<\/think>/s, '');
return { ...message, content };
}
@ -110,7 +109,7 @@ Below are all the files present in the project:
${filePaths.join('\n')}
---
Below is the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
CONTEXT BUFFER:
---
${codeContext}
@ -126,10 +125,14 @@ ${props.summary}
---
`;
const lastMessage = processedMessages.pop();
if (props.messageSliceId) {
processedMessages = processedMessages.slice(props.messageSliceId);
} else {
const lastMessage = processedMessages.pop();
if (lastMessage) {
processedMessages = [lastMessage];
if (lastMessage) {
processedMessages = [lastMessage];
}
}
}
}

View File

@ -82,10 +82,10 @@ export function createFilesContext(files: FileMap, useRelativePath?: boolean) {
filePath = path.replace('/home/project/', '');
}
return `<file path="${filePath}">\n${codeWithLinesNumbers}\n</file>`;
return `<boltAction type="file" filePath="${filePath}">${codeWithLinesNumbers}</boltAction>`;
});
return `<codebase>${fileContexts.join('\n\n')}\n\n</codebase>`;
return `<boltArtifact id="code-content" title="Code Content" >\n${fileContexts.join('\n')}\n</boltArtifact>`;
}
export function extractCurrentContext(messages: Message[]) {

View File

@ -10,6 +10,7 @@ import { getFilePaths, selectContext } from '~/lib/.server/llm/select-context';
import type { ContextAnnotation, ProgressAnnotation } from '~/types/context';
import { WORK_DIR } from '~/utils/constants';
import { createSummary } from '~/lib/.server/llm/create-summary';
import { extractPropertiesFromMessage } from '~/lib/.server/llm/utils';
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
@ -70,15 +71,21 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
const filePaths = getFilePaths(files || {});
let filteredFiles: FileMap | undefined = undefined;
let summary: string | undefined = undefined;
let messageSliceId = 0;
if (messages.length > 3) {
messageSliceId = messages.length - 3;
}
if (filePaths.length > 0 && contextOptimization) {
dataStream.writeData('HI ');
logger.debug('Generating Chat Summary');
dataStream.writeMessageAnnotation({
dataStream.writeData({
type: 'progress',
value: progressCounter++,
message: 'Generating Chat Summary',
} as ProgressAnnotation);
label: 'summary',
status: 'in-progress',
order: progressCounter++,
message: 'Analysing Request',
} satisfies ProgressAnnotation);
// Create a summary of the chat
console.log(`Messages count: ${messages.length}`);
@ -99,6 +106,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}
},
});
dataStream.writeData({
type: 'progress',
label: 'summary',
status: 'complete',
order: progressCounter++,
message: 'Analysis Complete',
} satisfies ProgressAnnotation);
dataStream.writeMessageAnnotation({
type: 'chatSummary',
@ -108,11 +122,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
// Update context buffer
logger.debug('Updating Context Buffer');
dataStream.writeMessageAnnotation({
dataStream.writeData({
type: 'progress',
value: progressCounter++,
message: 'Updating Context Buffer',
} as ProgressAnnotation);
label: 'context',
status: 'in-progress',
order: progressCounter++,
message: 'Determining Files to Read',
} satisfies ProgressAnnotation);
// Select context files
console.log(`Messages count: ${messages.length}`);
@ -152,12 +168,15 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
}),
} as ContextAnnotation);
dataStream.writeMessageAnnotation({
dataStream.writeData({
type: 'progress',
value: progressCounter++,
message: 'Context Buffer Updated',
} as ProgressAnnotation);
logger.debug('Context Buffer Updated');
label: 'context',
status: 'complete',
order: progressCounter++,
message: 'Code Files Selected',
} satisfies ProgressAnnotation);
// logger.debug('Code Files Selected');
}
// Stream the text
@ -181,6 +200,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
totalTokens: cumulativeUsage.totalTokens,
},
});
dataStream.writeData({
type: 'progress',
label: 'response',
status: 'complete',
order: progressCounter++,
message: 'Response Generated',
} satisfies ProgressAnnotation);
await new Promise((resolve) => setTimeout(resolve, 0));
// stream.close();
@ -195,8 +221,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
const lastUserMessage = messages.filter((x) => x.role == 'user').slice(-1)[0];
const { model, provider } = extractPropertiesFromMessage(lastUserMessage);
messages.push({ id: generateId(), role: 'assistant', content });
messages.push({ id: generateId(), role: 'user', content: CONTINUE_PROMPT });
messages.push({
id: generateId(),
role: 'user',
content: `[Model: ${model}]\n\n[Provider: ${provider}]\n\n${CONTINUE_PROMPT}`,
});
const result = await streamText({
messages,
@ -207,6 +239,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
providerSettings,
promptId,
contextOptimization,
contextFiles: filteredFiles,
summary,
messageSliceId,
});
result.mergeIntoDataStream(dataStream);
@ -226,6 +261,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
},
};
dataStream.writeData({
type: 'progress',
label: 'response',
status: 'in-progress',
order: progressCounter++,
message: 'Generating Response',
} satisfies ProgressAnnotation);
const result = await streamText({
messages,
env: context.cloudflare?.env,
@ -237,6 +280,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
contextOptimization,
contextFiles: filteredFiles,
summary,
messageSliceId,
});
(async () => {

View File

@ -7,6 +7,7 @@ import { MAX_TOKENS } from '~/lib/.server/llm/constants';
import { LLMManager } from '~/lib/modules/llm/manager';
import type { ModelInfo } from '~/lib/modules/llm/types';
import { getApiKeysFromCookie, getProviderSettingsFromCookie } from '~/lib/api/cookies';
import { createScopedLogger } from '~/utils/logger';
export async function action(args: ActionFunctionArgs) {
return llmCallAction(args);
@ -21,6 +22,8 @@ async function getModelList(options: {
return llmManager.updateModelList(options);
}
const logger = createScopedLogger('api.llmcall');
async function llmCallAction({ context, request }: ActionFunctionArgs) {
const { system, message, model, provider, streamOutput } = await request.json<{
system: string;
@ -106,6 +109,8 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
throw new Error('Provider not found');
}
logger.info(`Generating response Provider: ${provider.name}, Model: ${modelDetails.name}`);
const result = await generateText({
system,
messages: [
@ -123,6 +128,7 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
maxTokens: dynamicMaxTokens,
toolChoice: 'none',
});
logger.info(`Generated response`);
return new Response(JSON.stringify(result), {
status: 200,

View File

@ -11,6 +11,8 @@ export type ContextAnnotation =
export type ProgressAnnotation = {
type: 'progress';
value: number;
label: string;
status: 'in-progress' | 'complete';
order: number;
message: string;
};

View File

@ -59,6 +59,7 @@ Instructions:
5. If no perfect match exists, recommend the closest option
Important: Provide only the selection tags in your response, no additional text.
MOST IMPORTANT: YOU DONT HAVE TIME TO THINK JUST START RESPONDING BASED ON HUNCH
`;
const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));