mirror of
https://github.com/stackblitz-labs/bolt.diy
synced 2025-01-22 02:45:36 +00:00
fix: ollama and lm studio url issue fix for docker and build (#1008)
* fix: ollama and lm studio url issue fix for docker and build * vite config fix
This commit is contained in:
parent
3ecac25a35
commit
49c7129ded
@ -45,13 +45,14 @@ ENV WRANGLER_SEND_METRICS=false \
|
||||
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
|
||||
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
|
||||
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
|
||||
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
|
||||
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
|
||||
RUNNING_IN_DOCKER=true
|
||||
|
||||
# Pre-configure wrangler to disable metrics
|
||||
RUN mkdir -p /root/.config/.wrangler && \
|
||||
echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
|
||||
|
||||
RUN npm run build
|
||||
RUN pnpm run build
|
||||
|
||||
CMD [ "pnpm", "run", "dockerstart"]
|
||||
|
||||
@ -84,7 +85,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
|
||||
TOGETHER_API_BASE_URL=${TOGETHER_API_BASE_URL} \
|
||||
AWS_BEDROCK_CONFIG=${AWS_BEDROCK_CONFIG} \
|
||||
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
|
||||
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
|
||||
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}\
|
||||
RUNNING_IN_DOCKER=true
|
||||
|
||||
RUN mkdir -p ${WORKDIR}/run
|
||||
CMD pnpm run dev --host
|
||||
|
@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { createOpenAI } from '@ai-sdk/openai';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { logger } from '~/utils/logger';
|
||||
|
||||
export default class LMStudioProvider extends BaseProvider {
|
||||
name = 'LMStudio';
|
||||
@ -22,7 +23,7 @@ export default class LMStudioProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
let { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
@ -31,7 +32,18 @@ export default class LMStudioProvider extends BaseProvider {
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
throw new Error('No baseUrl found for LMStudio provider');
|
||||
}
|
||||
|
||||
if (typeof window === 'undefined') {
|
||||
/*
|
||||
* Running in Server
|
||||
* Backend: Check if we're running in Docker
|
||||
*/
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
|
||||
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
|
||||
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
@ -51,13 +63,26 @@ export default class LMStudioProvider extends BaseProvider {
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
}) => LanguageModelV1 = (options) => {
|
||||
const { apiKeys, providerSettings, serverEnv, model } = options;
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
let { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
throw new Error('No baseUrl found for LMStudio provider');
|
||||
}
|
||||
|
||||
if (typeof window === 'undefined') {
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
|
||||
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
|
||||
}
|
||||
|
||||
logger.debug('LMStudio Base Url used: ', baseUrl);
|
||||
|
||||
const lmstudio = createOpenAI({
|
||||
baseUrl: `${baseUrl}/v1`,
|
||||
apiKey: '',
|
||||
|
@ -3,6 +3,7 @@ import type { ModelInfo } from '~/lib/modules/llm/types';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import type { LanguageModelV1 } from 'ai';
|
||||
import { ollama } from 'ollama-ai-provider';
|
||||
import { logger } from '~/utils/logger';
|
||||
|
||||
interface OllamaModelDetails {
|
||||
parent_model: string;
|
||||
@ -45,7 +46,7 @@ export default class OllamaProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
let { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
@ -54,7 +55,18 @@ export default class OllamaProvider extends BaseProvider {
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
throw new Error('No baseUrl found for OLLAMA provider');
|
||||
}
|
||||
|
||||
if (typeof window === 'undefined') {
|
||||
/*
|
||||
* Running in Server
|
||||
* Backend: Check if we're running in Docker
|
||||
*/
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
|
||||
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
|
||||
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
@ -78,18 +90,23 @@ export default class OllamaProvider extends BaseProvider {
|
||||
const { apiKeys, providerSettings, serverEnv, model } = options;
|
||||
let { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
providerSettings: providerSettings?.[this.name],
|
||||
serverEnv: serverEnv as any,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
// Backend: Check if we're running in Docker
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
if (!baseUrl) {
|
||||
throw new Error('No baseUrl found for OLLAMA provider');
|
||||
}
|
||||
|
||||
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
|
||||
baseUrl = isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl;
|
||||
baseUrl = isDocker ? baseUrl.replace('127.0.0.1', 'host.docker.internal') : baseUrl;
|
||||
|
||||
logger.debug('Ollama Base Url used: ', baseUrl);
|
||||
|
||||
const ollamaInstance = ollama(model, {
|
||||
numCtx: DEFAULT_NUM_CTX,
|
||||
}) as LanguageModelV1 & { config: any };
|
||||
|
@ -4,9 +4,11 @@ import { defineConfig, type ViteDevServer } from 'vite';
|
||||
import { nodePolyfills } from 'vite-plugin-node-polyfills';
|
||||
import { optimizeCssModules } from 'vite-plugin-optimize-css-modules';
|
||||
import tsconfigPaths from 'vite-tsconfig-paths';
|
||||
|
||||
import * as dotenv from 'dotenv';
|
||||
import { execSync } from 'child_process';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
// Get git hash with fallback
|
||||
const getGitHash = () => {
|
||||
try {
|
||||
@ -17,18 +19,21 @@ const getGitHash = () => {
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
export default defineConfig((config) => {
|
||||
return {
|
||||
define: {
|
||||
__COMMIT_HASH: JSON.stringify(getGitHash()),
|
||||
__APP_VERSION: JSON.stringify(process.env.npm_package_version),
|
||||
// 'process.env': JSON.stringify(process.env)
|
||||
},
|
||||
build: {
|
||||
target: 'esnext',
|
||||
},
|
||||
plugins: [
|
||||
nodePolyfills({
|
||||
include: ['path', 'buffer'],
|
||||
include: ['path', 'buffer', 'process'],
|
||||
}),
|
||||
config.mode !== 'test' && remixCloudflareDevProxy(),
|
||||
remixVitePlugin({
|
||||
|
Loading…
Reference in New Issue
Block a user