import { BaseProvider } from '~/lib/modules/llm/base-provider'; import type { ModelInfo } from '~/lib/modules/llm/types'; import type { IProviderSetting } from '~/types/model'; import { createOpenAI } from '@ai-sdk/openai'; import type { LanguageModelV1 } from 'ai'; export default class LMStudioProvider extends BaseProvider { name = 'LMStudio'; getApiKeyLink = 'https://lmstudio.ai/'; labelForGetApiKey = 'Get LMStudio'; icon = 'i-ph:cloud-arrow-down'; config = { baseUrlKey: 'LMSTUDIO_API_BASE_URL', baseUrl: 'http://localhost:1234/', }; staticModels: ModelInfo[] = []; async getDynamicModels( apiKeys?: Record, settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { const { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, providerSettings: settings, serverEnv, defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL', defaultApiTokenKey: '', }); if (!baseUrl) { return []; } const response = await fetch(`${baseUrl}/v1/models`); const data = (await response.json()) as { data: Array<{ id: string }> }; return data.data.map((model) => ({ name: model.id, label: model.id, provider: this.name, maxTokenAllowed: 8000, })); } getModelInstance: (options: { model: string; serverEnv: Env; apiKeys?: Record; providerSettings?: Record; }) => LanguageModelV1 = (options) => { const { apiKeys, providerSettings, serverEnv, model } = options; const { baseUrl } = this.getProviderBaseUrlAndKey({ apiKeys, providerSettings, serverEnv: serverEnv as any, defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', defaultApiTokenKey: '', }); const lmstudio = createOpenAI({ baseUrl: `${baseUrl}/v1`, apiKey: '', }); return lmstudio(model); }; }