From 70161119065c9141f92a2666ad4c60496ec5ef07 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 29 Jan 2025 15:37:20 +0530 Subject: [PATCH] feat: enhanced Code Context and Project Summary Features (#1191) * fix: docker prod env variable fix * lint and typecheck * removed hardcoded tag * better summary generation * improved summary generation for context optimization * remove think tags from the generation --- app/components/chat/AssistantMessage.tsx | 82 +++++++++++++-- app/components/chat/BaseChat.tsx | 18 +++- app/components/chat/Chat.client.tsx | 68 +++++++----- app/components/chat/Markdown.tsx | 2 - app/components/chat/ProgressCompilation.tsx | 111 ++++++++++++++++++++ app/components/ui/Popover.tsx | 15 ++- app/lib/.server/llm/create-summary.ts | 87 ++++++++++++--- app/lib/.server/llm/select-context.ts | 9 +- app/lib/.server/llm/stream-text.ts | 21 ++-- app/lib/.server/llm/utils.ts | 4 +- app/routes/api.chat.ts | 74 ++++++++++--- app/routes/api.llmcall.ts | 6 ++ app/types/context.ts | 4 +- app/utils/selectStarterTemplate.ts | 1 + 14 files changed, 416 insertions(+), 86 deletions(-) create mode 100644 app/components/chat/ProgressCompilation.tsx diff --git a/app/components/chat/AssistantMessage.tsx b/app/components/chat/AssistantMessage.tsx index 60076bd8..1e3ed2d9 100644 --- a/app/components/chat/AssistantMessage.tsx +++ b/app/components/chat/AssistantMessage.tsx @@ -1,23 +1,55 @@ import { memo } from 'react'; import { Markdown } from './Markdown'; import type { JSONValue } from 'ai'; -import type { ProgressAnnotation } from '~/types/context'; import Popover from '~/components/ui/Popover'; +import { workbenchStore } from '~/lib/stores/workbench'; +import { WORK_DIR } from '~/utils/constants'; interface AssistantMessageProps { content: string; annotations?: JSONValue[]; } +function openArtifactInWorkbench(filePath: string) { + filePath = normalizedFilePath(filePath); + + if (workbenchStore.currentView.get() !== 'code') { + workbenchStore.currentView.set('code'); + } + + workbenchStore.setSelectedFile(`${WORK_DIR}/${filePath}`); +} + +function normalizedFilePath(path: string) { + let normalizedPath = path; + + if (normalizedPath.startsWith(WORK_DIR)) { + normalizedPath = path.replace(WORK_DIR, ''); + } + + if (normalizedPath.startsWith('/')) { + normalizedPath = normalizedPath.slice(1); + } + + return normalizedPath; +} + export const AssistantMessage = memo(({ content, annotations }: AssistantMessageProps) => { const filteredAnnotations = (annotations?.filter( (annotation: JSONValue) => annotation && typeof annotation === 'object' && Object.keys(annotation).includes('type'), ) || []) as { type: string; value: any } & { [key: string]: any }[]; - let progressAnnotation: ProgressAnnotation[] = filteredAnnotations.filter( - (annotation) => annotation.type === 'progress', - ) as ProgressAnnotation[]; - progressAnnotation = progressAnnotation.sort((a, b) => b.value - a.value); + let chatSummary: string | undefined = undefined; + + if (filteredAnnotations.find((annotation) => annotation.type === 'chatSummary')) { + chatSummary = filteredAnnotations.find((annotation) => annotation.type === 'chatSummary')?.summary; + } + + let codeContext: string[] | undefined = undefined; + + if (filteredAnnotations.find((annotation) => annotation.type === 'codeContext')) { + codeContext = filteredAnnotations.find((annotation) => annotation.type === 'codeContext')?.files; + } const usage: { completionTokens: number; @@ -29,8 +61,44 @@ export const AssistantMessage = memo(({ content, annotations }: AssistantMessage
<>
- {progressAnnotation.length > 0 && ( - }>{progressAnnotation[0].message} + {(codeContext || chatSummary) && ( + }> + {chatSummary && ( +
+
+

Summary

+
+ {chatSummary} +
+
+ {codeContext && ( +
+

Context

+
+ {codeContext.map((x) => { + const normalized = normalizedFilePath(x); + return ( + <> + { + e.preventDefault(); + e.stopPropagation(); + openArtifactInWorkbench(normalized); + }} + > + {normalized} + + + ); + })} +
+
+ )} +
+ )} +
+
)} {usage && (
diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index 4bfc038c..024f8b05 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -2,7 +2,7 @@ * @ts-nocheck * Preventing TS checks with files presented in the video for a better presentation. */ -import type { Message } from 'ai'; +import type { JSONValue, Message } from 'ai'; import React, { type RefCallback, useEffect, useState } from 'react'; import { ClientOnly } from 'remix-utils/client-only'; import { Menu } from '~/components/sidebar/Menu.client'; @@ -32,6 +32,8 @@ import StarterTemplates from './StarterTemplates'; import type { ActionAlert } from '~/types/actions'; import ChatAlert from './ChatAlert'; import type { ModelInfo } from '~/lib/modules/llm/types'; +import ProgressCompilation from './ProgressCompilation'; +import type { ProgressAnnotation } from '~/types/context'; const TEXTAREA_MIN_HEIGHT = 76; @@ -64,6 +66,7 @@ interface BaseChatProps { setImageDataList?: (dataList: string[]) => void; actionAlert?: ActionAlert; clearAlert?: () => void; + data?: JSONValue[] | undefined; } export const BaseChat = React.forwardRef( @@ -97,6 +100,7 @@ export const BaseChat = React.forwardRef( messages, actionAlert, clearAlert, + data, }, ref, ) => { @@ -108,7 +112,15 @@ export const BaseChat = React.forwardRef( const [recognition, setRecognition] = useState(null); const [transcript, setTranscript] = useState(''); const [isModelLoading, setIsModelLoading] = useState('all'); - + const [progressAnnotations, setProgressAnnotations] = useState([]); + useEffect(() => { + if (data) { + const progressList = data.filter( + (x) => typeof x === 'object' && (x as any).type === 'progress', + ) as ProgressAnnotation[]; + setProgressAnnotations(progressList); + } + }, [data]); useEffect(() => { console.log(transcript); }, [transcript]); @@ -307,6 +319,7 @@ export const BaseChat = React.forwardRef( className={classNames('pt-6 px-2 sm:px-6', { 'h-full flex flex-col': chatStarted, })} + ref={scrollRef} > {() => { @@ -337,6 +350,7 @@ export const BaseChat = React.forwardRef( /> )}
+ {progressAnnotations && }
>({}); - const { messages, isLoading, input, handleInputChange, setInput, stop, append, setMessages, reload, error } = - useChat({ - api: '/api/chat', - body: { - apiKeys, - files, - promptId, - contextOptimization: contextOptimizationEnabled, - }, - sendExtraMessageFields: true, - onError: (e) => { - logger.error('Request failed\n\n', e, error); - toast.error( - 'There was an error processing your request: ' + (e.message ? e.message : 'No details were returned'), - ); - }, - onFinish: (message, response) => { - const usage = response.usage; + const { + messages, + isLoading, + input, + handleInputChange, + setInput, + stop, + append, + setMessages, + reload, + error, + data: chatData, + setData, + } = useChat({ + api: '/api/chat', + body: { + apiKeys, + files, + promptId, + contextOptimization: contextOptimizationEnabled, + }, + sendExtraMessageFields: true, + onError: (e) => { + logger.error('Request failed\n\n', e, error); + toast.error( + 'There was an error processing your request: ' + (e.message ? e.message : 'No details were returned'), + ); + }, + onFinish: (message, response) => { + const usage = response.usage; + setData(undefined); - if (usage) { - console.log('Token usage:', usage); + if (usage) { + console.log('Token usage:', usage); - // You can now use the usage data as needed - } + // You can now use the usage data as needed + } - logger.debug('Finished streaming'); - }, - initialMessages, - initialInput: Cookies.get(PROMPT_COOKIE_KEY) || '', - }); + logger.debug('Finished streaming'); + }, + initialMessages, + initialInput: Cookies.get(PROMPT_COOKIE_KEY) || '', + }); useEffect(() => { const prompt = searchParams.get('prompt'); @@ -535,6 +548,7 @@ export const ChatImpl = memo( setImageDataList={setImageDataList} actionAlert={actionAlert} clearAlert={() => workbenchStore.clearAlert()} + data={chatData} /> ); }, diff --git a/app/components/chat/Markdown.tsx b/app/components/chat/Markdown.tsx index 46cffd4e..90ba6b7f 100644 --- a/app/components/chat/Markdown.tsx +++ b/app/components/chat/Markdown.tsx @@ -23,8 +23,6 @@ export const Markdown = memo(({ children, html = false, limitedMarkdown = false const components = useMemo(() => { return { div: ({ className, children, node, ...props }) => { - console.log(className, node); - if (className?.includes('__boltArtifact__')) { const messageId = node?.properties.dataMessageId as string; diff --git a/app/components/chat/ProgressCompilation.tsx b/app/components/chat/ProgressCompilation.tsx new file mode 100644 index 00000000..270fac03 --- /dev/null +++ b/app/components/chat/ProgressCompilation.tsx @@ -0,0 +1,111 @@ +import { AnimatePresence, motion } from 'framer-motion'; +import React, { useState } from 'react'; +import type { ProgressAnnotation } from '~/types/context'; +import { classNames } from '~/utils/classNames'; +import { cubicEasingFn } from '~/utils/easings'; + +export default function ProgressCompilation({ data }: { data?: ProgressAnnotation[] }) { + const [progressList, setProgressList] = React.useState([]); + const [expanded, setExpanded] = useState(false); + React.useEffect(() => { + if (!data || data.length == 0) { + setProgressList([]); + return; + } + + const progressMap = new Map(); + data.forEach((x) => { + const existingProgress = progressMap.get(x.label); + + if (existingProgress && existingProgress.status === 'complete') { + return; + } + + progressMap.set(x.label, x); + }); + + const newData = Array.from(progressMap.values()); + newData.sort((a, b) => a.order - b.order); + setProgressList(newData); + }, [data]); + + if (progressList.length === 0) { + return <>; + } + + return ( + +
+
+
+ + {expanded ? ( + + {progressList.map((x, i) => { + return ; + })} + + ) : ( + + )} + +
+ setExpanded((v) => !v)} + > +
+
+
+
+
+ ); +} + +const ProgressItem = ({ progress }: { progress: ProgressAnnotation }) => { + return ( + +
+
+ {progress.status === 'in-progress' ? ( +
+ ) : progress.status === 'complete' ? ( +
+ ) : null} +
+ {/* {x.label} */} +
+ {progress.message} +
+ ); +}; diff --git a/app/components/ui/Popover.tsx b/app/components/ui/Popover.tsx index d00bf976..7aab92e8 100644 --- a/app/components/ui/Popover.tsx +++ b/app/components/ui/Popover.tsx @@ -1,15 +1,24 @@ import * as Popover from '@radix-ui/react-popover'; import type { PropsWithChildren, ReactNode } from 'react'; -export default ({ children, trigger }: PropsWithChildren<{ trigger: ReactNode }>) => ( +export default ({ + children, + trigger, + side, + align, +}: PropsWithChildren<{ + trigger: ReactNode; + side: 'top' | 'right' | 'bottom' | 'left' | undefined; + align: 'center' | 'start' | 'end' | undefined; +}>) => ( {trigger} {children} diff --git a/app/lib/.server/llm/create-summary.ts b/app/lib/.server/llm/create-summary.ts index 4d14a279..e21abd81 100644 --- a/app/lib/.server/llm/create-summary.ts +++ b/app/lib/.server/llm/create-summary.ts @@ -16,7 +16,7 @@ export async function createSummary(props: { contextOptimization?: boolean; onFinish?: (resp: GenerateTextResult>, never>) => void; }) { - const { messages, env: serverEnv, apiKeys, providerSettings, contextOptimization, onFinish } = props; + const { messages, env: serverEnv, apiKeys, providerSettings, onFinish } = props; let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER.name; const processedMessages = messages.map((message) => { @@ -29,9 +29,9 @@ export async function createSummary(props: { } else if (message.role == 'assistant') { let content = message.content; - if (contextOptimization) { - content = simplifyBoltActions(content); - } + content = simplifyBoltActions(content); + content = content.replace(/
.*?<\/div>/s, ''); + content = content.replace(/.*?<\/think>/s, ''); return { ...message, content }; } @@ -92,6 +92,8 @@ ${summary.summary}`; } } + logger.debug('Sliced Messages:', slicedMessages.length); + const extractTextContent = (message: Message) => Array.isArray(message.content) ? (message.content.find((item) => item.type === 'text')?.text as string) || '' @@ -100,25 +102,82 @@ ${summary.summary}`; // select files from the list of code file from the project that might be useful for the current request from the user const resp = await generateText({ system: ` - You are a software engineer. You are working on a project. tou need to summarize the work till now and provide a summary of the chat till now. + You are a software engineer. You are working on a project. you need to summarize the work till now and provide a summary of the chat till now. - ${summaryText} - - RULES: - * Only provide the summary of the chat till now. - * Do not provide any new information. - `, - prompt: ` -please provide a summary of the chat till now. -below is the latest chat: + Please only use the following format to generate the summary: +--- +# Project Overview +- **Project**: {project_name} - {brief_description} +- **Current Phase**: {phase} +- **Tech Stack**: {languages}, {frameworks}, {key_dependencies} +- **Environment**: {critical_env_details} + +# Conversation Context +- **Last Topic**: {main_discussion_point} +- **Key Decisions**: {important_decisions_made} +- **User Context**: + - Technical Level: {expertise_level} + - Preferences: {coding_style_preferences} + - Communication: {preferred_explanation_style} + +# Implementation Status +## Current State +- **Active Feature**: {feature_in_development} +- **Progress**: {what_works_and_what_doesn't} +- **Blockers**: {current_challenges} + +## Code Evolution +- **Recent Changes**: {latest_modifications} +- **Working Patterns**: {successful_approaches} +- **Failed Approaches**: {attempted_solutions_that_failed} + +# Requirements +- **Implemented**: {completed_features} +- **In Progress**: {current_focus} +- **Pending**: {upcoming_features} +- **Technical Constraints**: {critical_constraints} + +# Critical Memory +- **Must Preserve**: {crucial_technical_context} +- **User Requirements**: {specific_user_needs} +- **Known Issues**: {documented_problems} + +# Next Actions +- **Immediate**: {next_steps} +- **Open Questions**: {unresolved_issues} --- +Note: +4. Keep entries concise and focused on information needed for continuity + + +--- + + RULES: + * Only provide the whole summary of the chat till now. + * Do not provide any new information. + * DO not need to think too much just start writing imidiately + * do not write any thing other that the summary with with the provided structure + `, + prompt: ` + +Here is the previous summary of the chat: + +${summaryText} + + +Below is the chat after that: +--- + ${slicedMessages .map((x) => { return `---\n[${x.role}] ${extractTextContent(x)}\n---`; }) .join('\n')} + --- + +Please provide a summary of the chat till now including the hitorical summary of the chat. `, model: provider.getModelInstance({ model: currentModel, diff --git a/app/lib/.server/llm/select-context.ts b/app/lib/.server/llm/select-context.ts index 85780e42..93e8be08 100644 --- a/app/lib/.server/llm/select-context.ts +++ b/app/lib/.server/llm/select-context.ts @@ -23,7 +23,7 @@ export async function selectContext(props: { summary: string; onFinish?: (resp: GenerateTextResult>, never>) => void; }) { - const { messages, env: serverEnv, apiKeys, files, providerSettings, contextOptimization, summary, onFinish } = props; + const { messages, env: serverEnv, apiKeys, files, providerSettings, summary, onFinish } = props; let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER.name; const processedMessages = messages.map((message) => { @@ -36,9 +36,10 @@ export async function selectContext(props: { } else if (message.role == 'assistant') { let content = message.content; - if (contextOptimization) { - content = simplifyBoltActions(content); - } + content = simplifyBoltActions(content); + + content = content.replace(/
.*?<\/div>/s, ''); + content = content.replace(/.*?<\/think>/s, ''); return { ...message, content }; } diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index 374610c7..29579c9f 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -7,7 +7,7 @@ import { PromptLibrary } from '~/lib/common/prompt-library'; import { allowedHTMLElements } from '~/utils/markdown'; import { LLMManager } from '~/lib/modules/llm/manager'; import { createScopedLogger } from '~/utils/logger'; -import { createFilesContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils'; +import { createFilesContext, extractPropertiesFromMessage } from './utils'; import { getFilePaths } from './select-context'; export type Messages = Message[]; @@ -27,6 +27,7 @@ export async function streamText(props: { contextOptimization?: boolean; contextFiles?: FileMap; summary?: string; + messageSliceId?: number; }) { const { messages, @@ -51,10 +52,8 @@ export async function streamText(props: { return { ...message, content }; } else if (message.role == 'assistant') { let content = message.content; - - if (contextOptimization) { - content = simplifyBoltActions(content); - } + content = content.replace(/
.*?<\/div>/s, ''); + content = content.replace(/.*?<\/think>/s, ''); return { ...message, content }; } @@ -110,7 +109,7 @@ Below are all the files present in the project: ${filePaths.join('\n')} --- -Below is the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request. +Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request. CONTEXT BUFFER: --- ${codeContext} @@ -126,10 +125,14 @@ ${props.summary} --- `; - const lastMessage = processedMessages.pop(); + if (props.messageSliceId) { + processedMessages = processedMessages.slice(props.messageSliceId); + } else { + const lastMessage = processedMessages.pop(); - if (lastMessage) { - processedMessages = [lastMessage]; + if (lastMessage) { + processedMessages = [lastMessage]; + } } } } diff --git a/app/lib/.server/llm/utils.ts b/app/lib/.server/llm/utils.ts index 9aac0891..e019a929 100644 --- a/app/lib/.server/llm/utils.ts +++ b/app/lib/.server/llm/utils.ts @@ -82,10 +82,10 @@ export function createFilesContext(files: FileMap, useRelativePath?: boolean) { filePath = path.replace('/home/project/', ''); } - return `\n${codeWithLinesNumbers}\n`; + return `${codeWithLinesNumbers}`; }); - return `${fileContexts.join('\n\n')}\n\n`; + return `\n${fileContexts.join('\n')}\n`; } export function extractCurrentContext(messages: Message[]) { diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts index 8564bafd..bbecdae5 100644 --- a/app/routes/api.chat.ts +++ b/app/routes/api.chat.ts @@ -10,6 +10,7 @@ import { getFilePaths, selectContext } from '~/lib/.server/llm/select-context'; import type { ContextAnnotation, ProgressAnnotation } from '~/types/context'; import { WORK_DIR } from '~/utils/constants'; import { createSummary } from '~/lib/.server/llm/create-summary'; +import { extractPropertiesFromMessage } from '~/lib/.server/llm/utils'; export async function action(args: ActionFunctionArgs) { return chatAction(args); @@ -70,15 +71,21 @@ async function chatAction({ context, request }: ActionFunctionArgs) { const filePaths = getFilePaths(files || {}); let filteredFiles: FileMap | undefined = undefined; let summary: string | undefined = undefined; + let messageSliceId = 0; + + if (messages.length > 3) { + messageSliceId = messages.length - 3; + } if (filePaths.length > 0 && contextOptimization) { - dataStream.writeData('HI '); logger.debug('Generating Chat Summary'); - dataStream.writeMessageAnnotation({ + dataStream.writeData({ type: 'progress', - value: progressCounter++, - message: 'Generating Chat Summary', - } as ProgressAnnotation); + label: 'summary', + status: 'in-progress', + order: progressCounter++, + message: 'Analysing Request', + } satisfies ProgressAnnotation); // Create a summary of the chat console.log(`Messages count: ${messages.length}`); @@ -99,6 +106,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) { } }, }); + dataStream.writeData({ + type: 'progress', + label: 'summary', + status: 'complete', + order: progressCounter++, + message: 'Analysis Complete', + } satisfies ProgressAnnotation); dataStream.writeMessageAnnotation({ type: 'chatSummary', @@ -108,11 +122,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) { // Update context buffer logger.debug('Updating Context Buffer'); - dataStream.writeMessageAnnotation({ + dataStream.writeData({ type: 'progress', - value: progressCounter++, - message: 'Updating Context Buffer', - } as ProgressAnnotation); + label: 'context', + status: 'in-progress', + order: progressCounter++, + message: 'Determining Files to Read', + } satisfies ProgressAnnotation); // Select context files console.log(`Messages count: ${messages.length}`); @@ -152,12 +168,15 @@ async function chatAction({ context, request }: ActionFunctionArgs) { }), } as ContextAnnotation); - dataStream.writeMessageAnnotation({ + dataStream.writeData({ type: 'progress', - value: progressCounter++, - message: 'Context Buffer Updated', - } as ProgressAnnotation); - logger.debug('Context Buffer Updated'); + label: 'context', + status: 'complete', + order: progressCounter++, + message: 'Code Files Selected', + } satisfies ProgressAnnotation); + + // logger.debug('Code Files Selected'); } // Stream the text @@ -181,6 +200,13 @@ async function chatAction({ context, request }: ActionFunctionArgs) { totalTokens: cumulativeUsage.totalTokens, }, }); + dataStream.writeData({ + type: 'progress', + label: 'response', + status: 'complete', + order: progressCounter++, + message: 'Response Generated', + } satisfies ProgressAnnotation); await new Promise((resolve) => setTimeout(resolve, 0)); // stream.close(); @@ -195,8 +221,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) { logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`); + const lastUserMessage = messages.filter((x) => x.role == 'user').slice(-1)[0]; + const { model, provider } = extractPropertiesFromMessage(lastUserMessage); messages.push({ id: generateId(), role: 'assistant', content }); - messages.push({ id: generateId(), role: 'user', content: CONTINUE_PROMPT }); + messages.push({ + id: generateId(), + role: 'user', + content: `[Model: ${model}]\n\n[Provider: ${provider}]\n\n${CONTINUE_PROMPT}`, + }); const result = await streamText({ messages, @@ -207,6 +239,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) { providerSettings, promptId, contextOptimization, + contextFiles: filteredFiles, + summary, + messageSliceId, }); result.mergeIntoDataStream(dataStream); @@ -226,6 +261,14 @@ async function chatAction({ context, request }: ActionFunctionArgs) { }, }; + dataStream.writeData({ + type: 'progress', + label: 'response', + status: 'in-progress', + order: progressCounter++, + message: 'Generating Response', + } satisfies ProgressAnnotation); + const result = await streamText({ messages, env: context.cloudflare?.env, @@ -237,6 +280,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { contextOptimization, contextFiles: filteredFiles, summary, + messageSliceId, }); (async () => { diff --git a/app/routes/api.llmcall.ts b/app/routes/api.llmcall.ts index 5dd4c098..cf75e499 100644 --- a/app/routes/api.llmcall.ts +++ b/app/routes/api.llmcall.ts @@ -7,6 +7,7 @@ import { MAX_TOKENS } from '~/lib/.server/llm/constants'; import { LLMManager } from '~/lib/modules/llm/manager'; import type { ModelInfo } from '~/lib/modules/llm/types'; import { getApiKeysFromCookie, getProviderSettingsFromCookie } from '~/lib/api/cookies'; +import { createScopedLogger } from '~/utils/logger'; export async function action(args: ActionFunctionArgs) { return llmCallAction(args); @@ -21,6 +22,8 @@ async function getModelList(options: { return llmManager.updateModelList(options); } +const logger = createScopedLogger('api.llmcall'); + async function llmCallAction({ context, request }: ActionFunctionArgs) { const { system, message, model, provider, streamOutput } = await request.json<{ system: string; @@ -106,6 +109,8 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) { throw new Error('Provider not found'); } + logger.info(`Generating response Provider: ${provider.name}, Model: ${modelDetails.name}`); + const result = await generateText({ system, messages: [ @@ -123,6 +128,7 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) { maxTokens: dynamicMaxTokens, toolChoice: 'none', }); + logger.info(`Generated response`); return new Response(JSON.stringify(result), { status: 200, diff --git a/app/types/context.ts b/app/types/context.ts index 4d8ea02b..75c21db7 100644 --- a/app/types/context.ts +++ b/app/types/context.ts @@ -11,6 +11,8 @@ export type ContextAnnotation = export type ProgressAnnotation = { type: 'progress'; - value: number; + label: string; + status: 'in-progress' | 'complete'; + order: number; message: string; }; diff --git a/app/utils/selectStarterTemplate.ts b/app/utils/selectStarterTemplate.ts index 4a7536da..0bbb1892 100644 --- a/app/utils/selectStarterTemplate.ts +++ b/app/utils/selectStarterTemplate.ts @@ -59,6 +59,7 @@ Instructions: 5. If no perfect match exists, recommend the closest option Important: Provide only the selection tags in your response, no additional text. +MOST IMPORTANT: YOU DONT HAVE TIME TO THINK JUST START RESPONDING BASED ON HUNCH `; const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));