diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts
index ad17c1fb..f09fd86e 100644
--- a/app/lib/.server/llm/stream-text.ts
+++ b/app/lib/.server/llm/stream-text.ts
@@ -8,7 +8,6 @@ import { allowedHTMLElements } from '~/utils/markdown';
import { LLMManager } from '~/lib/modules/llm/manager';
import { createScopedLogger } from '~/utils/logger';
import { createFilesContext, extractPropertiesFromMessage } from './utils';
-import { getFilePaths } from './select-context';
export type Messages = Message[];
@@ -43,7 +42,6 @@ export async function streamText(props: {
env: serverEnv,
options,
apiKeys,
- files,
providerSettings,
promptId,
contextOptimization,
@@ -64,6 +62,15 @@ export async function streamText(props: {
content = content.replace(/
.*?<\/div>/s, '');
content = content.replace(/.*?<\/think>/s, '');
+ // Remove package-lock.json content specifically keeping token usage MUCH lower
+ content = content.replace(
+ /[\s\S]*?<\/boltAction>/g,
+ '[package-lock.json content removed]',
+ );
+
+ // Trim whitespace potentially left after removals
+ content = content.trim();
+
return { ...message, content };
}
@@ -113,26 +120,12 @@ export async function streamText(props: {
},
}) ?? getSystemPrompt();
- if (files && contextFiles && contextOptimization) {
- // Filter out package-lock.json from the files used for context
- const filteredFiles = Object.fromEntries(
- Object.entries(files).filter(([filePath]) => !filePath.endsWith('package-lock.json')),
- );
- const filteredContextFiles = Object.fromEntries(
- Object.entries(contextFiles).filter(([filePath]) => !filePath.endsWith('package-lock.json')),
- );
-
- // Use the filtered maps to generate context
- const codeContext = createFilesContext(filteredContextFiles, true); // Uses filtered contextFiles
- const filePaths = getFilePaths(filteredFiles); // Uses filtered files
+ if (contextFiles && contextOptimization) {
+ const codeContext = createFilesContext(contextFiles, true);
systemPrompt = `${systemPrompt}
-Below are all the files present in the project (excluding package-lock.json):
----
-${filePaths.join('\n')}
----
-Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request (excluding package-lock.json).
+Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
CONTEXT BUFFER:
---
${codeContext}
@@ -162,126 +155,18 @@ ${props.summary}
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
- // Store original messages for reference
- const originalMessages = [...messages];
- const hasMultimodalContent = originalMessages.some((msg) => Array.isArray(msg.content));
+ console.log(systemPrompt, processedMessages);
- try {
- if (hasMultimodalContent) {
- /*
- * For multimodal content, we need to preserve the original array structure
- * but make sure the roles are valid and content items are properly formatted
- */
- const multimodalMessages = originalMessages.map((msg) => ({
- role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
- content: Array.isArray(msg.content)
- ? msg.content.map((item) => {
- // Ensure each content item has the correct format
- if (typeof item === 'string') {
- return { type: 'text', text: item };
- }
-
- if (item && typeof item === 'object') {
- if (item.type === 'image' && item.image) {
- return { type: 'image', image: item.image };
- }
-
- if (item.type === 'text') {
- return { type: 'text', text: item.text || '' };
- }
- }
-
- // Default fallback for unknown formats
- return { type: 'text', text: String(item || '') };
- })
- : [{ type: 'text', text: typeof msg.content === 'string' ? msg.content : String(msg.content || '') }],
- }));
-
- return await _streamText({
- model: provider.getModelInstance({
- model: modelDetails.name,
- serverEnv,
- apiKeys,
- providerSettings,
- }),
- system: systemPrompt,
- maxTokens: dynamicMaxTokens,
- messages: multimodalMessages as any,
- ...options,
- });
- } else {
- // For non-multimodal content, we use the standard approach
- const normalizedTextMessages = processedMessages.map((msg) => ({
- role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
- content: typeof msg.content === 'string' ? msg.content : String(msg.content || ''),
- }));
-
- return await _streamText({
- model: provider.getModelInstance({
- model: modelDetails.name,
- serverEnv,
- apiKeys,
- providerSettings,
- }),
- system: systemPrompt,
- maxTokens: dynamicMaxTokens,
- messages: convertToCoreMessages(normalizedTextMessages),
- ...options,
- });
- }
- } catch (error: any) {
- // Special handling for format errors
- if (error.message && error.message.includes('messages must be an array of CoreMessage or UIMessage')) {
- logger.warn('Message format error detected, attempting recovery with explicit formatting...');
-
- // Create properly formatted messages for all cases as a last resort
- const fallbackMessages = processedMessages.map((msg) => {
- // Determine text content with careful type handling
- let textContent = '';
-
- if (typeof msg.content === 'string') {
- textContent = msg.content;
- } else if (Array.isArray(msg.content)) {
- // Handle array content safely
- const contentArray = msg.content as any[];
- textContent = contentArray
- .map((contentItem) =>
- typeof contentItem === 'string'
- ? contentItem
- : contentItem?.text || contentItem?.image || String(contentItem || ''),
- )
- .join(' ');
- } else {
- textContent = String(msg.content || '');
- }
-
- return {
- role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
- content: [
- {
- type: 'text',
- text: textContent,
- },
- ],
- };
- });
-
- // Try one more time with the fallback format
- return await _streamText({
- model: provider.getModelInstance({
- model: modelDetails.name,
- serverEnv,
- apiKeys,
- providerSettings,
- }),
- system: systemPrompt,
- maxTokens: dynamicMaxTokens,
- messages: fallbackMessages as any,
- ...options,
- });
- }
-
- // If it's not a format error, re-throw the original error
- throw error;
- }
+ return await _streamText({
+ model: provider.getModelInstance({
+ model: modelDetails.name,
+ serverEnv,
+ apiKeys,
+ providerSettings,
+ }),
+ system: systemPrompt,
+ maxTokens: dynamicMaxTokens,
+ messages: convertToCoreMessages(processedMessages as any),
+ ...options,
+ });
}