diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index 2ef8940..76a3711 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -10,9 +10,12 @@ import { ollama } from 'ollama-ai-provider'; import { createOpenRouter } from '@openrouter/ai-sdk-provider'; import { createMistral } from '@ai-sdk/mistral'; import { createCohere } from '@ai-sdk/cohere'; +import type { LanguageModelV1 } from 'ai'; export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ? parseInt(process.env.DEFAULT_NUM_CTX, 10) : 32768; +type OptionalApiKey = string | undefined; + export function getAnthropicModel(apiKey: OptionalApiKey, model: string) { const anthropic = createAnthropic({ apiKey, @@ -20,9 +23,6 @@ export function getAnthropicModel(apiKey: OptionalApiKey, model: string) { return anthropic(model); } - -type OptionalApiKey = string | undefined; - export function getOpenAILikeModel(baseURL: string, apiKey: OptionalApiKey, model: string) { const openai = createOpenAI({ baseURL, @@ -85,7 +85,7 @@ export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) { export function getOllamaModel(baseURL: string, model: string) { const ollamaInstance = ollama(model, { numCtx: DEFAULT_NUM_CTX, - }); + }) as LanguageModelV1 & { config: any }; ollamaInstance.config.baseURL = `${baseURL}/api`; diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index b441f1c..86e285c 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -1,7 +1,6 @@ -/* - * @ts-nocheck - * Preventing TS checks with files presented in the video for a better presentation. - */ +// eslint-disable-next-line @typescript-eslint/ban-ts-comment +// @ts-nocheck – TODO: Provider proper types + import { streamText as _streamText, convertToCoreMessages } from 'ai'; import { getModel } from '~/lib/.server/llm/model'; import { MAX_TOKENS } from './constants'; diff --git a/app/routes/api.chat.ts b/app/routes/api.chat.ts index 0a6826f..ac35a22 100644 --- a/app/routes/api.chat.ts +++ b/app/routes/api.chat.ts @@ -1,7 +1,6 @@ -/* - * @ts-nocheck - * Preventing TS checks with files presented in the video for a better presentation. - */ +// eslint-disable-next-line @typescript-eslint/ban-ts-comment +// @ts-nocheck – TODO: Provider proper types + import { type ActionFunctionArgs } from '@remix-run/cloudflare'; import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants'; import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts'; diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 4e60d5f..de68a82 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -167,6 +167,48 @@ const PROVIDER_LIST: ProviderInfo[] = [ provider: 'HuggingFace', maxTokenAllowed: 8000, }, + { + name: 'Qwen/Qwen2.5-Coder-32B-Instruct', + label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'Qwen/Qwen2.5-72B-Instruct', + label: 'Qwen2.5-72B-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'meta-llama/Llama-3.1-70B-Instruct', + label: 'Llama-3.1-70B-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'meta-llama/Llama-3.1-405B', + label: 'Llama-3.1-405B (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: '01-ai/Yi-1.5-34B-Chat', + label: 'Yi-1.5-34B-Chat (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'codellama/CodeLlama-34b-Instruct-hf', + label: 'CodeLlama-34b-Instruct (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, + { + name: 'NousResearch/Hermes-3-Llama-3.1-8B', + label: 'Hermes-3-Llama-3.1-8B (HuggingFace)', + provider: 'HuggingFace', + maxTokenAllowed: 8000, + }, ], getApiKeyLink: 'https://huggingface.co/settings/tokens', }, diff --git a/app/utils/logger.ts b/app/utils/logger.ts index 9b2c31c..1a5c932 100644 --- a/app/utils/logger.ts +++ b/app/utils/logger.ts @@ -11,7 +11,7 @@ interface Logger { setLevel: (level: DebugLevel) => void; } -let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info'; +let currentLevel: DebugLevel = import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV ? 'debug' : 'info'; const isWorker = 'HTMLRewriter' in globalThis; const supportsColor = !isWorker; diff --git a/package.json b/package.json index 052c9cd..16c59f7 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ "test": "vitest --run", "test:watch": "vitest", "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint app", - "lint:fix": "pnpm run lint -- --fix", + "lint:fix": "npm run lint -- --fix && prettier app --write", "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings", "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session", "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",