From 49c7129dedb4752e2e69c0ffba15eda170caeb13 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Mon, 6 Jan 2025 19:18:42 +0530 Subject: [PATCH] fix: ollama and lm studio url issue fix for docker and build (#1008) * fix: ollama and lm studio url issue fix for docker and build * vite config fix --- Dockerfile | 8 ++++-- app/lib/modules/llm/providers/lmstudio.ts | 35 +++++++++++++++++++---- app/lib/modules/llm/providers/ollama.ts | 25 +++++++++++++--- vite.config.ts | 9 ++++-- 4 files changed, 63 insertions(+), 14 deletions(-) diff --git a/Dockerfile b/Dockerfile index cd2a6fbf..d287d407 100644 --- a/Dockerfile +++ b/Dockerfile @@ -45,13 +45,14 @@ ENV WRANGLER_SEND_METRICS=false \ TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ - DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\ + RUNNING_IN_DOCKER=true # Pre-configure wrangler to disable metrics RUN mkdir -p /root/.config/.wrangler && \ echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json -RUN npm run build +RUN pnpm run build CMD [ "pnpm", "run", "dockerstart"] @@ -84,7 +85,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \ TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ - DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} + DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\ + RUNNING_IN_DOCKER=true RUN mkdir -p ${WORKDIR}/run CMD pnpm run dev --host diff --git a/app/lib/modules/llm/providers/lmstudio.ts b/app/lib/modules/llm/providers/lmstudio.ts index 4309df0d..ba319ac8 100644 --- a/app/lib/modules/llm/providers/lmstudio.ts +++ b/app/lib/modules/llm/providers/lmstudio.ts @@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types'; import type { IProviderSetting } from '~/types/model'; import { createOpenAI } from '@ai-sdk/openai'; import type { LanguageModelV1 } from 'ai'; +import { logger } from '~/utils/logger'; export default class LMStudioProvider extends BaseProvider { name = 'LMStudio'; @@ -22,7 +23,7 @@ export default class LMStudioProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - const { baseUrl } = this.getProviderBaseUrlAndKey({ + let { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, providerSettings: settings, serverEnv, @@ -31,7 +32,18 @@ export default class LMStudioProvider extends BaseProvider { }); if (!baseUrl) { - return []; + throw new Error('No baseUrl found for LMStudio provider'); + } + + if (typeof window === 'undefined') { + /* + * Running in Server + * Backend: Check if we're running in Docker + */ + const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; + + baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; + baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl; } const response = await fetch(`${baseUrl}/v1/models`); @@ -51,13 +63,26 @@ export default class LMStudioProvider extends BaseProvider { providerSettings?: Record; }) => LanguageModelV1 = (options) => { const { apiKeys, providerSettings, serverEnv, model } = options; - const { baseUrl } = this.getProviderBaseUrlAndKey({ + let { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, - providerSettings, + providerSettings: providerSettings?.[this.name], serverEnv: serverEnv as any, - defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', + defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL', defaultApiTokenKey: '', }); + + if (!baseUrl) { + throw new Error('No baseUrl found for LMStudio provider'); + } + + if (typeof window === 'undefined') { + const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; + baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; + baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl; + } + + logger.debug('LMStudio Base Url used: ', baseUrl); + const lmstudio = createOpenAI({ baseUrl: `${baseUrl}/v1`, apiKey: '', diff --git a/app/lib/modules/llm/providers/ollama.ts b/app/lib/modules/llm/providers/ollama.ts index 8f0ddf22..11cf6a2b 100644 --- a/app/lib/modules/llm/providers/ollama.ts +++ b/app/lib/modules/llm/providers/ollama.ts @@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types'; import type { IProviderSetting } from '~/types/model'; import type { LanguageModelV1 } from 'ai'; import { ollama } from 'ollama-ai-provider'; +import { logger } from '~/utils/logger'; interface OllamaModelDetails { parent_model: string; @@ -45,7 +46,7 @@ export default class OllamaProvider extends BaseProvider { settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { - const { baseUrl } = this.getProviderBaseUrlAndKey({ + let { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, providerSettings: settings, serverEnv, @@ -54,7 +55,18 @@ export default class OllamaProvider extends BaseProvider { }); if (!baseUrl) { - return []; + throw new Error('No baseUrl found for OLLAMA provider'); + } + + if (typeof window === 'undefined') { + /* + * Running in Server + * Backend: Check if we're running in Docker + */ + const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; + + baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; + baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl; } const response = await fetch(`${baseUrl}/api/tags`); @@ -78,18 +90,23 @@ export default class OllamaProvider extends BaseProvider { const { apiKeys, providerSettings, serverEnv, model } = options; let { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, - providerSettings, + providerSettings: providerSettings?.[this.name], serverEnv: serverEnv as any, defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', defaultApiTokenKey: '', }); // Backend: Check if we're running in Docker - const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; + if (!baseUrl) { + throw new Error('No baseUrl found for OLLAMA provider'); + } + const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl; + logger.debug('Ollama Base Url used: ', baseUrl); + const ollamaInstance = ollama(model, { numCtx: DEFAULT_NUM_CTX, }) as LanguageModelV1 & { config: any }; diff --git a/vite.config.ts b/vite.config.ts index 312230a0..2510acbc 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -4,9 +4,11 @@ import { defineConfig, type ViteDevServer } from 'vite'; import { nodePolyfills } from 'vite-plugin-node-polyfills'; import { optimizeCssModules } from 'vite-plugin-optimize-css-modules'; import tsconfigPaths from 'vite-tsconfig-paths'; - +import * as dotenv from 'dotenv'; import { execSync } from 'child_process'; +dotenv.config(); + // Get git hash with fallback const getGitHash = () => { try { @@ -17,18 +19,21 @@ const getGitHash = () => { }; + + export default defineConfig((config) => { return { define: { __COMMIT_HASH: JSON.stringify(getGitHash()), __APP_VERSION: JSON.stringify(process.env.npm_package_version), + // 'process.env': JSON.stringify(process.env) }, build: { target: 'esnext', }, plugins: [ nodePolyfills({ - include: ['path', 'buffer'], + include: ['path', 'buffer', 'process'], }), config.mode !== 'test' && remixCloudflareDevProxy(), remixVitePlugin({