diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index e07f2bb..da296d0 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -15,14 +15,23 @@ export function getAnthropicModel(apiKey: string, model: string) { return anthropic(model); } -export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) { + +export function getOpenAILikeModel(baseURL: string, apiKey: string, model: string) { + // console.log('OpenAILike config:', { baseURL, hasApiKey: !!apiKey, model }); const openai = createOpenAI({ baseURL, apiKey, }); - - return openai(model); + // console.log('OpenAI client created:', !!openai); + const client = openai(model); + // console.log('OpenAI model client:', !!client); + return client; + // return { + // model: client, + // provider: 'OpenAILike' // Correctly identifying the actual provider + // }; } + export function getOpenAIModel(apiKey: string, model: string) { const openai = createOpenAI({ apiKey, @@ -74,7 +83,7 @@ export function getOllamaModel(baseURL: string, model: string) { return Ollama; } -export function getDeepseekModel(apiKey: string, model: string){ +export function getDeepseekModel(apiKey: string, model: string) { const openai = createOpenAI({ baseURL: 'https://api.deepseek.com/beta', apiKey, @@ -108,9 +117,15 @@ export function getXAIModel(apiKey: string, model: string) { return openai(model); } + export function getModel(provider: string, model: string, env: Env, apiKeys?: Record) { - const apiKey = getAPIKey(env, provider, apiKeys); - const baseURL = getBaseURL(env, provider); + let apiKey; // Declare first + let baseURL; + + apiKey = getAPIKey(env, provider, apiKeys); // Then assign + baseURL = getBaseURL(env, provider); + + // console.log('getModel inputs:', { provider, model, baseURL, hasApiKey: !!apiKey }); switch (provider) { case 'Anthropic': @@ -126,11 +141,11 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re case 'Google': return getGoogleModel(apiKey, model); case 'OpenAILike': - return getOpenAILikeModel(baseURL,apiKey, model); + return getOpenAILikeModel(baseURL, apiKey, model); case 'Deepseek': return getDeepseekModel(apiKey, model); case 'Mistral': - return getMistralModel(apiKey, model); + return getMistralModel(apiKey, model); case 'LMStudio': return getLMStudioModel(baseURL, model); case 'xAI': @@ -138,4 +153,4 @@ export function getModel(provider: string, model: string, env: Env, apiKeys?: Re default: return getOllamaModel(baseURL, model); } -} +} \ No newline at end of file diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index 3b563ea..28c8c96 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -52,6 +52,10 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid }) : textContent.replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, ''); + // console.log('Model from message:', model); + // console.log('Found in MODEL_LIST:', MODEL_LIST.find((m) => m.name === model)); + // console.log('Current MODEL_LIST:', MODEL_LIST); + return { model, provider, content: cleanedContent }; } @@ -64,7 +68,7 @@ export function streamText( let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER; - console.log('StreamText:', JSON.stringify(messages)); + // console.log('StreamText:', JSON.stringify(messages)); const processedMessages = messages.map((message) => { if (message.role === 'user') { @@ -82,11 +86,22 @@ export function streamText( return message; // No changes for non-user messages }); - return _streamText({ - model: getModel(currentProvider, currentModel, env, apiKeys), + // console.log('Message content:', messages[0].content); + // console.log('Extracted properties:', extractPropertiesFromMessage(messages[0])); + + const llmClient = getModel(currentProvider, currentModel, env, apiKeys); + // console.log('LLM Client:', llmClient); + + const llmConfig = { + ...options, + model: llmClient, //getModel(currentProvider, currentModel, env, apiKeys), + provider: currentProvider, system: getSystemPrompt(), maxTokens: MAX_TOKENS, messages: convertToCoreMessages(processedMessages), - ...options, - }); + }; + + // console.log('LLM Config:', llmConfig); + + return _streamText(llmConfig); } diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts index d622b46..bc42fb2 100644 --- a/app/routes/api.chat.ts +++ b/app/routes/api.chat.ts @@ -37,7 +37,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) { model: string }>(); - console.log('ChatAction:', JSON.stringify(messages)); + // console.log('ChatAction:', JSON.stringify(messages)); const cookieHeader = request.headers.get("Cookie"); diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 501a87e..b1b421b 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -32,6 +32,7 @@ const PROVIDER_LIST: ProviderInfo[] = [ name: 'OpenAILike', staticModels: [ { name: 'o1-mini', label: 'o1-mini', provider: 'OpenAILike' }, + { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI' }, ], getDynamicModels: getOpenAILikeModels }, @@ -58,7 +59,9 @@ const PROVIDER_LIST: ProviderInfo[] = [ }, { name: 'Google', - staticModels: [ + staticModels: [ + { name: 'gemini-exp-1121', label: 'Gemini Experimental 1121', provider: 'Google' }, + { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro 002', provider: 'Google' }, { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' }, { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' } ],