import { convertToCoreMessages, streamText as _streamText, type Message } from 'ai'; import { MAX_TOKENS, type FileMap } from './constants'; import { getSystemPrompt } from '~/lib/common/prompts/prompts'; import { DEFAULT_MODEL, DEFAULT_PROVIDER, MODIFICATIONS_TAG_NAME, PROVIDER_LIST, WORK_DIR } from '~/utils/constants'; import type { IProviderSetting } from '~/types/model'; import { PromptLibrary } from '~/lib/common/prompt-library'; import { allowedHTMLElements } from '~/utils/markdown'; import { LLMManager } from '~/lib/modules/llm/manager'; import { createScopedLogger } from '~/utils/logger'; import { createFilesContext, extractPropertiesFromMessage, simplifyBoltActions } from './utils'; import { getFilePaths } from './select-context'; export type Messages = Message[]; export type StreamingOptions = Omit[0], 'model'>; const logger = createScopedLogger('stream-text'); export async function streamText(props: { messages: Omit[]; env?: Env; options?: StreamingOptions; apiKeys?: Record; files?: FileMap; providerSettings?: Record; promptId?: string; contextOptimization?: boolean; contextFiles?: FileMap; summary?: string; }) { const { messages, env: serverEnv, options, apiKeys, files, providerSettings, promptId, contextOptimization, contextFiles, summary, } = props; let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER.name; let processedMessages = messages.map((message) => { if (message.role === 'user') { const { model, provider, content } = extractPropertiesFromMessage(message); currentModel = model; currentProvider = provider; return { ...message, content }; } else if (message.role == 'assistant') { let content = message.content; if (contextOptimization) { content = simplifyBoltActions(content); } return { ...message, content }; } return message; }); const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER; const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider); let modelDetails = staticModels.find((m) => m.name === currentModel); if (!modelDetails) { const modelsList = [ ...(provider.staticModels || []), ...(await LLMManager.getInstance().getModelListFromProvider(provider, { apiKeys, providerSettings, serverEnv: serverEnv as any, })), ]; if (!modelsList.length) { throw new Error(`No models found for provider ${provider.name}`); } modelDetails = modelsList.find((m) => m.name === currentModel); if (!modelDetails) { // Fallback to first model logger.warn( `MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`, ); modelDetails = modelsList[0]; } } const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS; let systemPrompt = PromptLibrary.getPropmtFromLibrary(promptId || 'default', { cwd: WORK_DIR, allowedHtmlElements: allowedHTMLElements, modificationTagName: MODIFICATIONS_TAG_NAME, }) ?? getSystemPrompt(); if (files && contextFiles && contextOptimization) { const codeContext = createFilesContext(contextFiles, true); const filePaths = getFilePaths(files); systemPrompt = `${systemPrompt} Below are all the files present in the project: --- ${filePaths.join('\n')} --- Below is the context loaded into context buffer for you to have knowledge of and might need changes to fullfill current user request. CONTEXT BUFFER: --- ${codeContext} --- `; if (summary) { systemPrompt = `${systemPrompt} below is the chat history till now CHAT SUMMARY: --- ${props.summary} --- `; const lastMessage = processedMessages.pop(); if (lastMessage) { processedMessages = [lastMessage]; } } } logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`); // console.log(systemPrompt,processedMessages); return await _streamText({ model: provider.getModelInstance({ model: modelDetails.name, serverEnv, apiKeys, providerSettings, }), system: systemPrompt, maxTokens: dynamicMaxTokens, messages: convertToCoreMessages(processedMessages as any), ...options, }); }