From b6ad8971ab79adfd6a4d60e2ed17752e1bdf9b55 Mon Sep 17 00:00:00 2001 From: Bruno Braga Date: Wed, 1 Jan 2025 20:54:53 -0300 Subject: [PATCH 1/3] Create glhf.chat.ts add glhf.chat service to providers --- app/lib/modules/llm/providers/glhf.chat.ts | 119 +++++++++++++++++++++ 1 file changed, 119 insertions(+) create mode 100644 app/lib/modules/llm/providers/glhf.chat.ts diff --git a/app/lib/modules/llm/providers/glhf.chat.ts b/app/lib/modules/llm/providers/glhf.chat.ts new file mode 100644 index 00000000..cdf5c737 --- /dev/null +++ b/app/lib/modules/llm/providers/glhf.chat.ts @@ -0,0 +1,119 @@ +import { BaseProvider, getOpenAILikeModel } from '~/lib/modules/llm/base-provider'; +import type { ModelInfo } from '~/lib/modules/llm/types'; +import type { IProviderSetting } from '~/types/model'; +import type { LanguageModelV1 } from 'ai'; + +export default class GLHFProvider extends BaseProvider { + name = 'glhf.chat'; + getApiKeyLink = 'https://glhf.chat/users/settings/api'; + + config = { + baseUrlKey: 'GLHF_API_BASE_URL', + apiTokenKey: 'GLHF_API_KEY', + }; + + defaultBaseUrl = 'https://glhf.chat/api/openai/v1'; + + get staticModels(): ModelInfo[] { + return [ + { + name: 'hf:mistralai/Mistral-7B-Instruct-v0.3', + label: 'Mistral-7B-Instruct', + provider: this.name, + maxTokenAllowed: 8000, + }, + { + name: 'hf:qwen/Qwen2.5-Coder-32B-Instruct', + label: 'Qwen2.5-Coder-32B', + provider: this.name, + maxTokenAllowed: 8000, + }, + { + name: 'hf:deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct', + label: 'DeepSeek-Coder-V2-Lite', + provider: this.name, + maxTokenAllowed: 8000, + }, + { + name: 'hf:nvidia/Llama-3.1-Nemotron-70B-Instruct-HF', + label: 'Llama-3.1-Nemotron-70B', + provider: this.name, + maxTokenAllowed: 8000, + }, + { + name: 'hf:google/codegemma-7b-it', + label: 'CodeGemma-7B', + provider: this.name, + maxTokenAllowed: 8000, + } + ]; + } + + async getDynamicModels( + apiKeys?: Record, + settings?: IProviderSetting, + serverEnv: Record = {}, + ): Promise { + // Retornamos apenas os modelos estáticos, evitando duplicação + return this.staticModels; + } + + getModelInstance(options: { + model?: string; + serverEnv: Record; + apiKeys?: Record; + providerSettings?: Record; + }): LanguageModelV1 { + const { model, serverEnv, apiKeys, providerSettings } = options; + + const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({ + apiKeys, + providerSettings: providerSettings?.[this.name], + serverEnv, + defaultBaseUrlKey: this.config.baseUrlKey, + defaultApiTokenKey: this.config.apiTokenKey, + }); + + const effectiveBaseUrl = baseUrl || this.defaultBaseUrl; + + if (!apiKey) { + throw new Error(`Missing API key for ${this.name} provider`); + } + + // Usa o primeiro modelo como padrão se nenhum for especificado + const modelToUse = model || this.staticModels[0].name; + return getOpenAILikeModel(effectiveBaseUrl, apiKey, modelToUse); + } + + async testApiConnection(): Promise { + const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({ + serverEnv: process.env as any, + defaultBaseUrlKey: this.config.baseUrlKey, + defaultApiTokenKey: this.config.apiTokenKey, + }); + + const effectiveBaseUrl = baseUrl || this.defaultBaseUrl; + + if (!apiKey) { + throw new Error('Missing API key for GLHF provider during connection test.'); + } + + try { + const response = await fetch(`${effectiveBaseUrl}/models`, { + headers: { + 'Authorization': `Bearer ${apiKey}`, + 'Content-Type': 'application/json' + }, + }); + + if (!response.ok) { + throw new Error(`GLHF API connection failed: ${response.status} ${response.statusText}`); + } + + console.log('GLHF API connection successful.'); + } catch (error) { + console.error('Error during GLHF API connection test:', error); + throw error; + } + } +} From bf3e3b096cf76be5e9a8506303f0f01a0926b354 Mon Sep 17 00:00:00 2001 From: Bruno Braga Date: Wed, 1 Jan 2025 20:56:10 -0300 Subject: [PATCH 2/3] Update registry.ts add glhf.chat service to providers --- app/lib/modules/llm/registry.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/lib/modules/llm/registry.ts b/app/lib/modules/llm/registry.ts index c002eb88..41f8a1fa 100644 --- a/app/lib/modules/llm/registry.ts +++ b/app/lib/modules/llm/registry.ts @@ -9,6 +9,7 @@ import MistralProvider from './providers/mistral'; import OllamaProvider from './providers/ollama'; import OpenRouterProvider from './providers/open-router'; import OpenAILikeProvider from './providers/openai-like'; +import glhfchatProvider from './providers/glhf.chat'; import OpenAIProvider from './providers/openai'; import PerplexityProvider from './providers/perplexity'; import TogetherProvider from './providers/together'; @@ -26,6 +27,7 @@ export { MistralProvider, OllamaProvider, OpenAIProvider, + glhfchatProvider, OpenRouterProvider, OpenAILikeProvider, PerplexityProvider, From b4788775ba47c5035dbb02d9b353701b1c108d5f Mon Sep 17 00:00:00 2001 From: Bruno Braga Date: Wed, 1 Jan 2025 20:58:56 -0300 Subject: [PATCH 3/3] Update .env.example add glhf.chat to bolt.diy --- .env.example | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.env.example b/.env.example index 6f2f5f5a..1180e716 100644 --- a/.env.example +++ b/.env.example @@ -51,6 +51,10 @@ OPENAI_LIKE_API_KEY= # Get your Together API Key TOGETHER_API_KEY= +#Glhf.Chat +GLHF_API_KEY=your glhf.chat api +GLHF_API_BASE_URL=https://glhf.chat/api/openai/v1 + # You only need this environment variable set if you want to use Hyperbolic models #Get your Hyperbolics API Key at https://app.hyperbolic.xyz/settings #baseURL="https://api.hyperbolic.xyz/v1/chat/completions"