fix: ollama and lm studio url issue fix for docker and build (#1008)

* fix: ollama and lm studio url issue fix for docker and build

* vite config fix
This commit is contained in:
Anirban Kar 2025-01-06 19:18:42 +05:30 committed by GitHub
parent 3ecac25a35
commit 49c7129ded
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 63 additions and 14 deletions

View File

@ -45,13 +45,14 @@ ENV WRANGLER_SEND_METRICS=false \
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
RUNNING_IN_DOCKER=true
# Pre-configure wrangler to disable metrics # Pre-configure wrangler to disable metrics
RUN mkdir -p /root/.config/.wrangler && \ RUN mkdir -p /root/.config/.wrangler && \
echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
RUN npm run build RUN pnpm run build
CMD [ "pnpm", "run", "dockerstart"] CMD [ "pnpm", "run", "dockerstart"]
@ -84,7 +85,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \ TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \ AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \ VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX} DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
RUNNING_IN_DOCKER=true
RUN mkdir -p ${WORKDIR}/run RUN mkdir -p ${WORKDIR}/run
CMD pnpm run dev --host CMD pnpm run dev --host

View File

@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model'; import type { IProviderSetting } from '~/types/model';
import { createOpenAI } from '@ai-sdk/openai'; import { createOpenAI } from '@ai-sdk/openai';
import type { LanguageModelV1 } from 'ai'; import type { LanguageModelV1 } from 'ai';
import { logger } from '~/utils/logger';
export default class LMStudioProvider extends BaseProvider { export default class LMStudioProvider extends BaseProvider {
name = 'LMStudio'; name = 'LMStudio';
@ -22,7 +23,7 @@ export default class LMStudioProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
const { baseUrl } = this.getProviderBaseUrlAndKey({ let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
serverEnv, serverEnv,
@ -31,7 +32,18 @@ export default class LMStudioProvider extends BaseProvider {
}); });
if (!baseUrl) { if (!baseUrl) {
return []; throw new Error('No baseUrl found for LMStudio provider');
}
if (typeof window === 'undefined') {
/*
* Running in Server
* Backend: Check if we're running in Docker
*/
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
} }
const response = await fetch(`${baseUrl}/v1/models`); const response = await fetch(`${baseUrl}/v1/models`);
@ -51,13 +63,26 @@ export default class LMStudioProvider extends BaseProvider {
providerSettings?: Record<string, IProviderSetting>; providerSettings?: Record<string, IProviderSetting>;
}) => LanguageModelV1 = (options) => { }) => LanguageModelV1 = (options) => {
const { apiKeys, providerSettings, serverEnv, model } = options; const { apiKeys, providerSettings, serverEnv, model } = options;
const { baseUrl } = this.getProviderBaseUrlAndKey({ let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings, providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any, serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
defaultApiTokenKey: '', defaultApiTokenKey: '',
}); });
if (!baseUrl) {
throw new Error('No baseUrl found for LMStudio provider');
}
if (typeof window === 'undefined') {
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
}
logger.debug('LMStudio Base Url used: ', baseUrl);
const lmstudio = createOpenAI({ const lmstudio = createOpenAI({
baseUrl: `${baseUrl}/v1`, baseUrl: `${baseUrl}/v1`,
apiKey: '', apiKey: '',

View File

@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model'; import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai'; import type { LanguageModelV1 } from 'ai';
import { ollama } from 'ollama-ai-provider'; import { ollama } from 'ollama-ai-provider';
import { logger } from '~/utils/logger';
interface OllamaModelDetails { interface OllamaModelDetails {
parent_model: string; parent_model: string;
@ -45,7 +46,7 @@ export default class OllamaProvider extends BaseProvider {
settings?: IProviderSetting, settings?: IProviderSetting,
serverEnv: Record<string, string> = {}, serverEnv: Record<string, string> = {},
): Promise<ModelInfo[]> { ): Promise<ModelInfo[]> {
const { baseUrl } = this.getProviderBaseUrlAndKey({ let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings: settings, providerSettings: settings,
serverEnv, serverEnv,
@ -54,7 +55,18 @@ export default class OllamaProvider extends BaseProvider {
}); });
if (!baseUrl) { if (!baseUrl) {
return []; throw new Error('No baseUrl found for OLLAMA provider');
}
if (typeof window === 'undefined') {
/*
* Running in Server
* Backend: Check if we're running in Docker
*/
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
} }
const response = await fetch(`${baseUrl}/api/tags`); const response = await fetch(`${baseUrl}/api/tags`);
@ -78,18 +90,23 @@ export default class OllamaProvider extends BaseProvider {
const { apiKeys, providerSettings, serverEnv, model } = options; const { apiKeys, providerSettings, serverEnv, model } = options;
let { baseUrl } = this.getProviderBaseUrlAndKey({ let { baseUrl } = this.getProviderBaseUrlAndKey({
apiKeys, apiKeys,
providerSettings, providerSettings: providerSettings?.[this.name],
serverEnv: serverEnv as any, serverEnv: serverEnv as any,
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
defaultApiTokenKey: '', defaultApiTokenKey: '',
}); });
// Backend: Check if we're running in Docker // Backend: Check if we're running in Docker
const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; if (!baseUrl) {
throw new Error('No baseUrl found for OLLAMA provider');
}
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl; baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
logger.debug('Ollama Base Url used: ', baseUrl);
const ollamaInstance = ollama(model, { const ollamaInstance = ollama(model, {
numCtx: DEFAULT_NUM_CTX, numCtx: DEFAULT_NUM_CTX,
}) as LanguageModelV1 & { config: any }; }) as LanguageModelV1 & { config: any };

View File

@ -4,9 +4,11 @@ import { defineConfig, type ViteDevServer } from 'vite';
import { nodePolyfills } from 'vite-plugin-node-polyfills'; import { nodePolyfills } from 'vite-plugin-node-polyfills';
import { optimizeCssModules } from 'vite-plugin-optimize-css-modules'; import { optimizeCssModules } from 'vite-plugin-optimize-css-modules';
import tsconfigPaths from 'vite-tsconfig-paths'; import tsconfigPaths from 'vite-tsconfig-paths';
import * as dotenv from 'dotenv';
import { execSync } from 'child_process'; import { execSync } from 'child_process';
dotenv.config();
// Get git hash with fallback // Get git hash with fallback
const getGitHash = () => { const getGitHash = () => {
try { try {
@ -17,18 +19,21 @@ const getGitHash = () => {
}; };
export default defineConfig((config) => { export default defineConfig((config) => {
return { return {
define: { define: {
__COMMIT_HASH: JSON.stringify(getGitHash()), __COMMIT_HASH: JSON.stringify(getGitHash()),
__APP_VERSION: JSON.stringify(process.env.npm_package_version), __APP_VERSION: JSON.stringify(process.env.npm_package_version),
// 'process.env': JSON.stringify(process.env)
}, },
build: { build: {
target: 'esnext', target: 'esnext',
}, },
plugins: [ plugins: [
nodePolyfills({ nodePolyfills({
include: ['path', 'buffer'], include: ['path', 'buffer', 'process'],
}), }),
config.mode !== 'test' && remixCloudflareDevProxy(), config.mode !== 'test' && remixCloudflareDevProxy(),
remixVitePlugin({ remixVitePlugin({