refactor(llm): simplify streamText function and remove unused code

Remove unused imports, files parameter, and redundant multimodal handling logic. Streamline the function by directly passing processed messages to _streamText. Also, add specific handling to remove package-lock.json content to reduce token usage
This commit is contained in:
KevIsDev 2025-04-30 00:50:00 +01:00
parent 3a894d0516
commit 51762835d5

View File

@ -8,7 +8,6 @@ import { allowedHTMLElements } from '~/utils/markdown';
import { LLMManager } from '~/lib/modules/llm/manager'; import { LLMManager } from '~/lib/modules/llm/manager';
import { createScopedLogger } from '~/utils/logger'; import { createScopedLogger } from '~/utils/logger';
import { createFilesContext, extractPropertiesFromMessage } from './utils'; import { createFilesContext, extractPropertiesFromMessage } from './utils';
import { getFilePaths } from './select-context';
export type Messages = Message[]; export type Messages = Message[];
@ -43,7 +42,6 @@ export async function streamText(props: {
env: serverEnv, env: serverEnv,
options, options,
apiKeys, apiKeys,
files,
providerSettings, providerSettings,
promptId, promptId,
contextOptimization, contextOptimization,
@ -64,6 +62,15 @@ export async function streamText(props: {
content = content.replace(/<div class=\\"__boltThought__\\">.*?<\/div>/s, ''); content = content.replace(/<div class=\\"__boltThought__\\">.*?<\/div>/s, '');
content = content.replace(/<think>.*?<\/think>/s, ''); content = content.replace(/<think>.*?<\/think>/s, '');
// Remove package-lock.json content specifically keeping token usage MUCH lower
content = content.replace(
/<boltAction type="file" filePath="package-lock\.json">[\s\S]*?<\/boltAction>/g,
'[package-lock.json content removed]',
);
// Trim whitespace potentially left after removals
content = content.trim();
return { ...message, content }; return { ...message, content };
} }
@ -113,26 +120,12 @@ export async function streamText(props: {
}, },
}) ?? getSystemPrompt(); }) ?? getSystemPrompt();
if (files && contextFiles && contextOptimization) { if (contextFiles && contextOptimization) {
// Filter out package-lock.json from the files used for context const codeContext = createFilesContext(contextFiles, true);
const filteredFiles = Object.fromEntries(
Object.entries(files).filter(([filePath]) => !filePath.endsWith('package-lock.json')),
);
const filteredContextFiles = Object.fromEntries(
Object.entries(contextFiles).filter(([filePath]) => !filePath.endsWith('package-lock.json')),
);
// Use the filtered maps to generate context
const codeContext = createFilesContext(filteredContextFiles, true); // Uses filtered contextFiles
const filePaths = getFilePaths(filteredFiles); // Uses filtered files
systemPrompt = `${systemPrompt} systemPrompt = `${systemPrompt}
Below are all the files present in the project (excluding package-lock.json):
---
${filePaths.join('\n')}
---
Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request (excluding package-lock.json). Below is the artifact containing the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request.
CONTEXT BUFFER: CONTEXT BUFFER:
--- ---
${codeContext} ${codeContext}
@ -162,40 +155,7 @@ ${props.summary}
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`); logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
// Store original messages for reference console.log(systemPrompt, processedMessages);
const originalMessages = [...messages];
const hasMultimodalContent = originalMessages.some((msg) => Array.isArray(msg.content));
try {
if (hasMultimodalContent) {
/*
* For multimodal content, we need to preserve the original array structure
* but make sure the roles are valid and content items are properly formatted
*/
const multimodalMessages = originalMessages.map((msg) => ({
role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
content: Array.isArray(msg.content)
? msg.content.map((item) => {
// Ensure each content item has the correct format
if (typeof item === 'string') {
return { type: 'text', text: item };
}
if (item && typeof item === 'object') {
if (item.type === 'image' && item.image) {
return { type: 'image', image: item.image };
}
if (item.type === 'text') {
return { type: 'text', text: item.text || '' };
}
}
// Default fallback for unknown formats
return { type: 'text', text: String(item || '') };
})
: [{ type: 'text', text: typeof msg.content === 'string' ? msg.content : String(msg.content || '') }],
}));
return await _streamText({ return await _streamText({
model: provider.getModelInstance({ model: provider.getModelInstance({
@ -206,82 +166,7 @@ ${props.summary}
}), }),
system: systemPrompt, system: systemPrompt,
maxTokens: dynamicMaxTokens, maxTokens: dynamicMaxTokens,
messages: multimodalMessages as any, messages: convertToCoreMessages(processedMessages as any),
...options, ...options,
}); });
} else {
// For non-multimodal content, we use the standard approach
const normalizedTextMessages = processedMessages.map((msg) => ({
role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
content: typeof msg.content === 'string' ? msg.content : String(msg.content || ''),
}));
return await _streamText({
model: provider.getModelInstance({
model: modelDetails.name,
serverEnv,
apiKeys,
providerSettings,
}),
system: systemPrompt,
maxTokens: dynamicMaxTokens,
messages: convertToCoreMessages(normalizedTextMessages),
...options,
});
}
} catch (error: any) {
// Special handling for format errors
if (error.message && error.message.includes('messages must be an array of CoreMessage or UIMessage')) {
logger.warn('Message format error detected, attempting recovery with explicit formatting...');
// Create properly formatted messages for all cases as a last resort
const fallbackMessages = processedMessages.map((msg) => {
// Determine text content with careful type handling
let textContent = '';
if (typeof msg.content === 'string') {
textContent = msg.content;
} else if (Array.isArray(msg.content)) {
// Handle array content safely
const contentArray = msg.content as any[];
textContent = contentArray
.map((contentItem) =>
typeof contentItem === 'string'
? contentItem
: contentItem?.text || contentItem?.image || String(contentItem || ''),
)
.join(' ');
} else {
textContent = String(msg.content || '');
}
return {
role: msg.role === 'system' || msg.role === 'user' || msg.role === 'assistant' ? msg.role : 'user',
content: [
{
type: 'text',
text: textContent,
},
],
};
});
// Try one more time with the fallback format
return await _streamText({
model: provider.getModelInstance({
model: modelDetails.name,
serverEnv,
apiKeys,
providerSettings,
}),
system: systemPrompt,
maxTokens: dynamicMaxTokens,
messages: fallbackMessages as any,
...options,
});
}
// If it's not a format error, re-throw the original error
throw error;
}
} }