From 05146c18d6e3b5410100089fed99c30d60dccad7 Mon Sep 17 00:00:00 2001 From: Dustin Loring Date: Mon, 16 Dec 2024 18:40:04 -0500 Subject: [PATCH 01/17] fix: Prompt Enhance Prompt Enhance option stopped, this fixes it --- app/routes/api.enhancer.ts | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/app/routes/api.enhancer.ts b/app/routes/api.enhancer.ts index c2dbba4..c53d25a 100644 --- a/app/routes/api.enhancer.ts +++ b/app/routes/api.enhancer.ts @@ -114,14 +114,30 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) { for (const line of lines) { try { - const parsed = JSON.parse(line); + // Handle token-based streaming format + if (line.includes('0:"')) { + // Extract all token contents and join them + const tokens = line.match(/0:"([^"]+)"/g) || []; + const content = tokens + .map(token => token.slice(3, -1)) // Remove the '0:"' prefix and '"' suffix + .join(''); + + if (content) { + controller.enqueue(encoder.encode(content)); + } + continue; + } + // Try to parse as JSON if it's not token-based format + const parsed = JSON.parse(line); if (parsed.type === 'text') { controller.enqueue(encoder.encode(parsed.value)); } } catch (e) { - // skip invalid JSON lines - console.warn('Failed to parse stream part:', line, e); + // If not JSON and not token-based, treat as plain text + if (!line.includes('e:') && !line.includes('d:')) { // Skip metadata lines + controller.enqueue(encoder.encode(line)); + } } } }, From 2d57bd27461c5d63d454f93fb17b001dfdf28211 Mon Sep 17 00:00:00 2001 From: Dustin Loring Date: Mon, 16 Dec 2024 18:55:38 -0500 Subject: [PATCH 02/17] add: charactors to the vaild list for chat titles added common punctuation to the charactersValid --- app/lib/hooks/useEditChatDescription.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/lib/hooks/useEditChatDescription.ts b/app/lib/hooks/useEditChatDescription.ts index da07f2c..5230d6c 100644 --- a/app/lib/hooks/useEditChatDescription.ts +++ b/app/lib/hooks/useEditChatDescription.ts @@ -92,7 +92,8 @@ export function useEditChatDescription({ } const lengthValid = trimmedDesc.length > 0 && trimmedDesc.length <= 100; - const characterValid = /^[a-zA-Z0-9\s]+$/.test(trimmedDesc); + // Allow letters, numbers, spaces, and common punctuation but exclude characters that could cause issues + const characterValid = /^[a-zA-Z0-9\s\-_.,!?()[\]{}'"]+$/.test(trimmedDesc); if (!lengthValid) { toast.error('Description must be between 1 and 100 characters.'); @@ -100,7 +101,7 @@ export function useEditChatDescription({ } if (!characterValid) { - toast.error('Description can only contain alphanumeric characters and spaces.'); + toast.error('Description can only contain letters, numbers, spaces, and basic punctuation.'); return false; } From defe73367e91f4c04f2f7185fc64cb94f0a7ddaa Mon Sep 17 00:00:00 2001 From: Dustin Loring Date: Tue, 17 Dec 2024 07:56:25 -0500 Subject: [PATCH 03/17] add: miniflare and wrangler error added information to FAQ in docs --- docs/docs/FAQ.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/docs/FAQ.md b/docs/docs/FAQ.md index de1b4b1..9f18a88 100644 --- a/docs/docs/FAQ.md +++ b/docs/docs/FAQ.md @@ -72,4 +72,9 @@ Local LLMs like Qwen-2.5-Coder are powerful for small applications but still exp --- +### **"Miniflare or Wrangler errors in Windows"** +You will need to make sure you have the latest version of Visual Studio C++ installed (14.40.33816), more information here https://github.com/stackblitz-labs/bolt.diy/issues/19. + +--- + Got more questions? Feel free to reach out or open an issue in our GitHub repo! From a2330084eb9c960dff0a58b7e45f501266fc12a1 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 17 Dec 2024 15:25:41 +0000 Subject: [PATCH 04/17] chore: update commit hash to 282beb96e2ee92ba8b1174aaaf9f270e03a288e8 --- app/commit.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/commit.json b/app/commit.json index 832678f..7dc5919 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "1e72d52278730f7d22448be9d5cf2daf12559486", "version": "0.0.2" } +{ "commit": "282beb96e2ee92ba8b1174aaaf9f270e03a288e8", "version": "0.0.2" } From 18d04ca065f4a3823779b905dbf3006373892a56 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 02:20:14 +0530 Subject: [PATCH 05/17] simplified the fix --- app/commit.json | 2 +- app/routes/api.enhancer.ts | 43 +------------------------------------- 2 files changed, 2 insertions(+), 43 deletions(-) diff --git a/app/commit.json b/app/commit.json index 832678f..f7222f2 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "1e72d52278730f7d22448be9d5cf2daf12559486", "version": "0.0.2" } +{ "commit": "05146c18d6e3b5410100089fed99c30d60dccad7" } diff --git a/app/routes/api.enhancer.ts b/app/routes/api.enhancer.ts index c53d25a..2b8fee8 100644 --- a/app/routes/api.enhancer.ts +++ b/app/routes/api.enhancer.ts @@ -5,9 +5,6 @@ import { streamText } from '~/lib/.server/llm/stream-text'; import { stripIndents } from '~/utils/stripIndent'; import type { IProviderSetting, ProviderInfo } from '~/types/model'; -const encoder = new TextEncoder(); -const decoder = new TextDecoder(); - export async function action(args: ActionFunctionArgs) { return enhancerAction(args); } @@ -107,45 +104,7 @@ async function enhancerAction({ context, request }: ActionFunctionArgs) { providerSettings, }); - const transformStream = new TransformStream({ - transform(chunk, controller) { - const text = decoder.decode(chunk); - const lines = text.split('\n').filter((line) => line.trim() !== ''); - - for (const line of lines) { - try { - // Handle token-based streaming format - if (line.includes('0:"')) { - // Extract all token contents and join them - const tokens = line.match(/0:"([^"]+)"/g) || []; - const content = tokens - .map(token => token.slice(3, -1)) // Remove the '0:"' prefix and '"' suffix - .join(''); - - if (content) { - controller.enqueue(encoder.encode(content)); - } - continue; - } - - // Try to parse as JSON if it's not token-based format - const parsed = JSON.parse(line); - if (parsed.type === 'text') { - controller.enqueue(encoder.encode(parsed.value)); - } - } catch (e) { - // If not JSON and not token-based, treat as plain text - if (!line.includes('e:') && !line.includes('d:')) { // Skip metadata lines - controller.enqueue(encoder.encode(line)); - } - } - } - }, - }); - - const transformedStream = result.toDataStream().pipeThrough(transformStream); - - return new Response(transformedStream, { + return new Response(result.textStream, { status: 200, headers: { 'Content-Type': 'text/plain; charset=utf-8', From fce8999f27c0affbc762dc90de992b5a759ab325 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 17 Dec 2024 21:00:04 +0000 Subject: [PATCH 06/17] chore: release version 0.0.3 --- app/commit.json | 2 +- changelog.md | 266 +++--------------------------------------------- package.json | 2 +- 3 files changed, 15 insertions(+), 255 deletions(-) diff --git a/app/commit.json b/app/commit.json index 7dc5919..b9c669a 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "282beb96e2ee92ba8b1174aaaf9f270e03a288e8", "version": "0.0.2" } +{ "commit": "eb6d4353565be31c6e20bfca2c5aea29e4f45b6d", "version": "0.0.3" } diff --git a/changelog.md b/changelog.md index ad934ea..9c4d50f 100644 --- a/changelog.md +++ b/changelog.md @@ -1,271 +1,31 @@ -# Release v0.0.2 - -### πŸ”„ Changes since v0.0.1 - -#### ✨ Features - -- add unit configuration to uno.config.ts -- added perplexity model -- Experimental Prompt Library Added -- start update by branch +# Release v0.0.3 +### πŸ”„ Changes since v0.0.2 #### πŸ› Bug Fixes -- added more controlled rate for code streaming -- handle conflicts between input method engine and enter key -- LM Studio sending messgae -- adjust intro section margin and textarea outline style in BaseChat component -- commit-file-ignore -- lm studio fix -- start new chat icon -- removed context optimization temporarily, to be moved to optional from menu +- Prompt Enhance -#### ♻️ Code Refactoring +#### πŸ“š Documentation -- remove unused React import in ImportButtons component -- simplify GitCloneButton component by removing unused tooltip and streamlining button structure +- miniflare error knowledge #### πŸ”§ Chores -- update commit hash to 7bafd2a5d67dce70d15b77201ef8de9745efab61 -- update commit hash to e5ecb0b7d5e0fb53f13654689cebd8eb99b10578 -- update commit hash to 8f15c81f37f36667fe796b1f75d0003a7c0f395b -- update commit hash to d13da30bda2d10eb2da42113493625cd55e0d34d -- update commit hash to dd296ab00d4d51ea0bc30ebe9aed0e6632feb37a -- update commit hash to eeafc12522b184dcbded28c5c6606e4a23e6849f -- update commit hash to d479daa5781a533c68a6f9ffdb3b919914c9305e -- update commit hash to 5773b1e271c8effec20ff1c10a759d9a654a2a94 -- update commit hash to 5f3405151043b3c32da7acc6353247a5508969b3 -- update commit hash to 5f3405151043b3c32da7acc6353247a5508969b3 -- update commit hash to 0c899e430a4d33e78e3e44ebf7100b5da14eda3f -- update commit hash to 1d64a15ed0110fc62091b1dca90139de9fb9fdb4 -- update commit hash to d1fa70fc97dc7839ea8cd005feb03266f201cf4f -- update commit hash to 1e04ab38b07e82852626b164890f4a6df1f98cef -- update commit hash to 8c4397a19f3eab2382082a39526d66385e9d2a49 -- update commit hash to 55094392cf4c5bc607aff796680ad50236a4cf20 -- update commit hash to 9666b2ab67d25345542722ab9d870b36ad06252e -- update commit hash to 6a5ed21c0fed92a8c842b683bf9a430901e6bb05 -- update commit hash to 4af18c069f2429ffaf410d92702a1e1294af2628 -- update commit hash to a71cfba660f04a8440960ab772670b192e2d066f -- update commit hash to 4f02887565e13eeaabbfb6f699cbe089e802338f -- update commit hash to f27f7bba5132346db18e70e514a6a6202d6ab634 -- update commit hash to eb53146937cbe49a84aaaaa59882df6db4a1e459 -- update commit hash to 4f10fb1294e11cf8f5a68b30a1e85acdf65ffcbc -- update commit hash to 43370f515df1184be2fb54db637a73bb683d6d86 -- update commit hash to ece0213500a94a6b29e29512c5040baf57884014 -- update commit hash to b06f6e3a3e7e5b2b5f8d9b13a761422993559f3e -- update commit hash to 25fe15232fcd6cee83f179adbd1d3e7d6a90acca -- update commit hash to a87cfd79503a62db2be00656f4874ec747d76a09 -- update commit hash to 7c3a3bbde6c61f374a6d37c888c6900a335e3d33 -- update commit hash to d936c012bdeb210ee876be1941ef8e370ea0b2e3 -- update commit hash to b3f7a5c3785060c7937dcd681b38f17b5396fc84 -- update commit hash to 23346f6271bf2f438489660357e6ffee803befb1 -- update commit hash to 9cd9ee9088467882e1e4efdf491959619307cc9d -- update commit hash to 87a90718d31bd8ec501cb32f863efd26156fb1e2 -- update commit hash to e223e9b6af1f6f31300fd7ed9ce498236cedd5dc -- update commit hash to 4016f54933102bf67336b8ae58e14673dfad72ee -- update commit hash to 1e7c3a4ff8f3153f53e0b0ed7cb13434825e41d9 -- update commit hash to d75899d737243cd7303704adef16d77290de5a0b -- update commit hash to b5867835f5da5c93bd9a8376df9e9d32b97acff5 -- update commit hash to d22b32ae636b9f134cdb5f96a10e4398aa2171b7 -- update commit hash to d9b2801434011b60dca700c19cabd0652f31f8e4 -- update commit hash to 0157fddc76fd5eebc545085e2c3c4ab37d9ca925 -- update commit hash to 810cc81a16955eebec943f7d504749dbcbb85b25 -- update commit hash to d3727459aa594505efd0cef58c4218eaf48d5baf -- update commit hash to 6ba93974a02a98c83badf2f0002ff4812b8f75a9 -- update commit hash to 960f532f8234663d0b3630d18033c959fac6882c -- update commit hash to 77073a5e7f759ae8e5752628131d0c56df6b5c34 -- update commit hash to 78505ed2f347dd3a7778b4c1c7c38c89ecacedd3 -- update commit hash to f752bf7da532ec6196dafff1c388250d44db4de5 -- update commit hash to 995fb81ac7a03eb1a6d1c56cf2fc92a60028c024 -- update commit hash to 8aee6ebf477c08d896b4419fbdeb670cc2bb8f29 -- update commit hash to 6987ceae9e1e91bec301f9e25ed9e8e03449d806 -- update commit hash to eb1d5417e77e699e0489f09814e87fb5afed9dd5 -- update commit hash to de2cb43d170033c43a6cf436af02e033f66a7e4d -- update commit hash to 49b02dd885919e24a201f07b1a7b0fd0371b4f85 -- update commit hash to 43e1f436f57fc4adb43b5481b403967803d4786d -- update commit hash to 0a4ef117ae5d3687b04415e64a22794ea55841d1 -- update commit hash to 25b80ab267541b6ea290985dde09863f1a29c85c -- update commit hash to c257129a61e258650b321c19323ddebaf03b0a54 +- adding back semantic pull pr check for better changelog system +- update commit hash to 1e72d52278730f7d22448be9d5cf2daf12559486 +- update commit hash to 282beb96e2ee92ba8b1174aaaf9f270e03a288e8 #### πŸ” Other Changes -- Check the render method of SlotClone. #432 -- Initial commit for screen cap feature -- Second commit for screen cap feature -- Add 90b llama-3.2 model for better performance -- More selection tool changes -- feat(context optimization):improved context management and redused chat overhead -- added backdrop and loading screen -- basic context menu for folders -- copyPath and copyRelativePath for files and folders -- pnpm lock file -- Refactor to use newver v4 version of Vercel AI package -- removed console logs -- Update README.md -- Update README.md -- Update README.md -- Update README.md -- Merge branch 'main' into context-optimization -- Merge branch 'main' into context-optimization -- added prompt url params -- added support for private github repo through github connections -- Add Logo icons LLM's -- Settings UI enhancement -- Event logs bug fix -- Merge branch 'stackblitz-labs:main' into main -- auto select model on provider disabled -- Update debug tab to check against fork -- debug fixes -- minor bug fixes -- Merge branch 'main' of https://github.com/stackblitz-labs/bolt.diy -- Update commit.json -- Merge branch 'main' of https://github.com/Stijnus/bolt.new-any-llm -- Update commit.json -- Merge pull request #684 from thecodacus/fix-auto-select-model -- ui styles fixed -- Update README.md -- some clean up and added a all log option -- Merge remote-tracking branch 'github-desktop-stijnus/main' into pr/676 -- update README.md -- Merge branch 'main' into main -- Merge pull request #676 from Stijnus/main -- Update .gitignore -- Update commit.json -- Merge branch 'main' into fix/start-new-chat-icon -- Merge branch 'main' into fix/ui-enhancements -- Merge pull request #708 from SujalXplores/fix/ui-enhancements -- Update constants.ts -- Merge pull request #578 from thecodacus/context-optimization -- Merge pull request #713 from thecodacus/context-optimization-fix -- merged main -- Merge branch 'main' into feat/image-select-merge -- merge main into image -- Merge pull request #670 from thecodacus/private-github-repo -- Merge branch 'main' into streaming-fixed -- Merge pull request #655 from thecodacus/streaming-fixed -- Update BaseChat.tsx -- Merge pull request #679 from Dlouxgit/main -- Merge branch 'main' into feat/image-select -- merge main -- groq-llama3.3-70b -- Merge branch 'main' into feat/image-select -- Merge pull request #582 from emcconnell/feat/image-select -- update readme -- update readme -- Merge branch 'main' into update-readme -- Merge pull request #722 from emcconnell/update-readme -- Groq Llama 3.2 90B Vision Preview -- Merge -- Setting Modal Changes -- Renamed feature -- combined optional features -- Update DebugTab.tsx -- Update DebugTab.tsx -- Branding updates -- Update DebugTab.tsx -- prompt enhanced toast notification -- Merge branch 'main' into perplexity-models -- Merge pull request #715 from meetpateltech/perplexity-models -- Merge pull request #602 from mark-when/contextMenu2 -- Merge pull request #728 from dustinwloring1988/branding/Change-Bolt-to-bolt -- Setting-Menu -- prompt-enhanced-toast -- Merge pull request #726 from dustinwloring1988/ui-ux/features-tab -- fallback icon for provider -- fix-perplexity-icon -- Update README.md -- updated readme -- updated readme -- Perplexity Provider Icon -- perplexity-provider-icon -- README-formatting -- Merge branch 'main' into system-prompt-variations -- update by branch -- Merge branch 'main' into ui-ux/debug-tab -- updated the examples and added strict rules -- Merge branch 'main' into system-prompt-variations -- Update commit.yaml -- Update commit.yaml -- branding update -- updated to use settings for branch selection -- Update useSettings.tsx -- quick fix -- Update FAQ.md -- Update CONTRIBUTING.md -- quick fix -- update-Bolt-to-bolt -- debug-tab -- Update mkdocs.yml -- Update vite.config.ts -- added auto detect branch name and version tag -- Update constants.ts -- Update DebugTab.tsx -- a fav.ico -- favicon-ico -- fix -- Merge pull request #753 from dustinwloring1988/fix/lm-studio-fetch-warning -- Merge pull request #751 from dustinwloring1988/fix/v3_lazyRouteDiscovery-warn -- mkdoc-update-names -- mkdoc consistent style -- Merge branch 'main' into system-prompt-variations-local -- Update ConnectionsTab.tsx -- quick fix -- mkdoc-docs-styled -- new section heading -- new section heading -- Make links clickable in docs -- Update CONTRIBUTING.md -- fix clickable links docs -- default provider icon -- default-provider-image -- Another attempt to add toek usage info -- merge -- Lint fix -- updated implementation -- Merge branch 'main' into fix-variable-name -- Merge pull request #755 from thecodacus/fix-variable-name -- Merge branch 'main' into token-usage -- Merge pull request #769 from thecodacus/token-usage - Merge remote-tracking branch 'upstream/main' -- Merge remote-tracking branch 'origin/main' into system-prompt-variations-local -- Merge branch 'main' into main -- added missing icons for safari -- Merge pull request #760 from Stijnus/main -- Merge branch 'main' into app-fail-safari-fix -- Merge pull request #771 from thecodacus/app-fail-safari-fix -- Merge pull request #433 from DiegoSouzaPW/feature/SlotCloneError -- Merge remote-tracking branch 'upstream/main' -- commit workflow fix -- Merge pull request #772 from thecodacus/commit-workflow-fix -- Merge remote-tracking branch 'upstream/main' -- Merge branch 'main' into system-prompt-variations-local -- Merge pull request #744 from thecodacus/system-prompt-variations -- Merge remote-tracking branch 'upstream/main' -- updated workflow for commit and stable release -- Merge pull request #773 from thecodacus/workflowfix -- Fixed theming of Copy Code button -- Merge branch 'main' into copyMyFix -- Merge remote-tracking branch 'upstream/main' -- minor bugfix -- Merge branch 'minor-bugfix' into bugfix-for-stable -- Merge branch 'main' into prompt-url-params -- Merge pull request #669 from thecodacus/prompt-url-params -- Merge branch 'main' into add-loading-on-git-import-from-url -- added UI fix for loading screen -- Merge branch 'main' into add-loading-on-git-import-from-url -- Merge pull request #597 from thecodacus/add-loading-on-git-import-from-url -- Merge branch 'main' into copyMyFix -- Merge pull request #774 from D-Byte/copyMyFix -- Merge remote-tracking branch 'upstream/main' -- Merge branch 'main' into bugfix-for-stable -- Merge pull request #757 from dustinwloring1988/feat/enhanced-github-connection -- Merge remote-tracking branch 'upstream/main' -- Merge branch 'main' into bugfix-for-stable +- Merge pull request #781 from thecodacus/semantic-pull-pr +- miniflare and wrangler error +- simplified the fix +- Merge branch 'main' into fix/prompt-enhance -**Full Changelog**: [`v0.0.1..v0.0.2`](https://github.com/stackblitz-labs/bolt.diy/compare/v0.0.1...v0.0.2) +**Full Changelog**: [`v0.0.2..v0.0.3`](https://github.com/stackblitz-labs/bolt.diy/compare/v0.0.2...v0.0.3) diff --git a/package.json b/package.json index 8456734..05d483b 100644 --- a/package.json +++ b/package.json @@ -5,7 +5,7 @@ "license": "MIT", "sideEffects": false, "type": "module", - "version": "0.0.2", + "version": "0.0.3", "scripts": { "deploy": "npm run build && wrangler pages deploy", "build": "remix vite:build", From 6003c165ef9347d5a9f97ea711f84d37d65b5c0c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Tue, 17 Dec 2024 22:12:17 +0000 Subject: [PATCH 07/17] chore: update commit hash to e064803955604198c6aac7b257efd0ad8503cb73 --- app/commit.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/commit.json b/app/commit.json index b9c669a..7ff7537 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "eb6d4353565be31c6e20bfca2c5aea29e4f45b6d", "version": "0.0.3" } +{ "commit": "e064803955604198c6aac7b257efd0ad8503cb73", "version": "0.0.3" } From 62ebfe51a69788229aa62d34afb4ce89b7cd8ac8 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 16:34:18 +0530 Subject: [PATCH 08/17] fix: .env file baseUrl Issue --- app/commit.json | 2 +- app/components/chat/BaseChat.tsx | 6 +- .../settings/providers/ProvidersTab.tsx | 7 +- app/entry.server.tsx | 2 +- app/lib/.server/llm/api-key.ts | 25 +++- app/lib/.server/llm/model.ts | 11 +- app/lib/.server/llm/stream-text.ts | 9 +- app/types/model.ts | 6 +- app/utils/constants.ts | 124 ++++++++++++++---- 9 files changed, 149 insertions(+), 43 deletions(-) diff --git a/app/commit.json b/app/commit.json index b9c669a..4ff5294 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "eb6d4353565be31c6e20bfca2c5aea29e4f45b6d", "version": "0.0.3" } +{ "commit": "fce8999f27c0affbc762dc90de992b5a759ab325" } diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index 2084cbb..5db6653 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -119,6 +119,9 @@ export const BaseChat = React.forwardRef( useEffect(() => { // Load API keys from cookies on component mount + + let parsedApiKeys: Record | undefined = {}; + try { const storedApiKeys = Cookies.get('apiKeys'); @@ -127,6 +130,7 @@ export const BaseChat = React.forwardRef( if (typeof parsedKeys === 'object' && parsedKeys !== null) { setApiKeys(parsedKeys); + parsedApiKeys = parsedKeys; } } } catch (error) { @@ -155,7 +159,7 @@ export const BaseChat = React.forwardRef( Cookies.remove('providers'); } - initializeModelList(providerSettings).then((modelList) => { + initializeModelList({ apiKeys: parsedApiKeys, providerSettings }).then((modelList) => { setModelList(modelList); }); diff --git a/app/components/settings/providers/ProvidersTab.tsx b/app/components/settings/providers/ProvidersTab.tsx index 281b4c8..49a16f6 100644 --- a/app/components/settings/providers/ProvidersTab.tsx +++ b/app/components/settings/providers/ProvidersTab.tsx @@ -87,7 +87,12 @@ export default function ProvidersTab() { type="text" value={provider.settings.baseUrl || ''} onChange={(e) => { - const newBaseUrl = e.target.value; + let newBaseUrl: string | undefined = e.target.value; + + if (newBaseUrl && newBaseUrl.trim().length === 0) { + newBaseUrl = undefined; + } + updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl }); logStore.logProvider(`Base URL updated for ${provider.name}`, { provider: provider.name, diff --git a/app/entry.server.tsx b/app/entry.server.tsx index a44917f..5e92d21 100644 --- a/app/entry.server.tsx +++ b/app/entry.server.tsx @@ -14,7 +14,7 @@ export default async function handleRequest( remixContext: EntryContext, _loadContext: AppLoadContext, ) { - await initializeModelList(); + await initializeModelList({}); const readable = await renderToReadableStream(, { signal: request.signal, diff --git a/app/lib/.server/llm/api-key.ts b/app/lib/.server/llm/api-key.ts index e82d08e..d21f070 100644 --- a/app/lib/.server/llm/api-key.ts +++ b/app/lib/.server/llm/api-key.ts @@ -3,6 +3,7 @@ * Preventing TS checks with files presented in the video for a better presentation. */ import { env } from 'node:process'; +import type { IProviderSetting } from '~/types/model'; export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record) { /** @@ -50,16 +51,30 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re } } -export function getBaseURL(cloudflareEnv: Env, provider: string) { +export function getBaseURL(cloudflareEnv: Env, provider: string, providerSettings?: Record) { + let settingBaseUrl = providerSettings?.[provider].baseUrl; + + if (settingBaseUrl && settingBaseUrl.length == 0) { + settingBaseUrl = undefined; + } + switch (provider) { case 'Together': - return env.TOGETHER_API_BASE_URL || cloudflareEnv.TOGETHER_API_BASE_URL || 'https://api.together.xyz/v1'; + return ( + settingBaseUrl || + env.TOGETHER_API_BASE_URL || + cloudflareEnv.TOGETHER_API_BASE_URL || + 'https://api.together.xyz/v1' + ); case 'OpenAILike': - return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL; + return settingBaseUrl || env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL; case 'LMStudio': - return env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234'; + return ( + settingBaseUrl || env.LMSTUDIO_API_BASE_URL || cloudflareEnv.LMSTUDIO_API_BASE_URL || 'http://localhost:1234' + ); case 'Ollama': { - let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434'; + let baseUrl = + settingBaseUrl || env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || 'http://localhost:11434'; if (env.RUNNING_IN_DOCKER === 'true') { baseUrl = baseUrl.replace('localhost', 'host.docker.internal'); diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index 1a5aab7..1feb499 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -84,6 +84,8 @@ export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) { } export function getOllamaModel(baseURL: string, model: string) { + console.log({ baseURL, model }); + const ollamaInstance = ollama(model, { numCtx: DEFAULT_NUM_CTX, }) as LanguageModelV1 & { config: any }; @@ -140,7 +142,7 @@ export function getPerplexityModel(apiKey: OptionalApiKey, model: string) { export function getModel( provider: string, model: string, - env: Env, + serverEnv: Env, apiKeys?: Record, providerSettings?: Record, ) { @@ -148,9 +150,12 @@ export function getModel( * let apiKey; // Declare first * let baseURL; */ + // console.log({provider,model}); - const apiKey = getAPIKey(env, provider, apiKeys); // Then assign - const baseURL = providerSettings?.[provider].baseUrl || getBaseURL(env, provider); + const apiKey = getAPIKey(serverEnv, provider, apiKeys); // Then assign + const baseURL = getBaseURL(serverEnv, provider, providerSettings); + + // console.log({apiKey,baseURL}); switch (provider) { case 'Anthropic': diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index 74cdd9d..6bbf568 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -151,10 +151,13 @@ export async function streamText(props: { providerSettings?: Record; promptId?: string; }) { - const { messages, env, options, apiKeys, files, providerSettings, promptId } = props; + const { messages, env: serverEnv, options, apiKeys, files, providerSettings, promptId } = props; + + // console.log({serverEnv}); + let currentModel = DEFAULT_MODEL; let currentProvider = DEFAULT_PROVIDER.name; - const MODEL_LIST = await getModelList(apiKeys || {}, providerSettings); + const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any }); const processedMessages = messages.map((message) => { if (message.role === 'user') { const { model, provider, content } = extractPropertiesFromMessage(message); @@ -196,7 +199,7 @@ export async function streamText(props: { } return _streamText({ - model: getModel(currentProvider, currentModel, env, apiKeys, providerSettings) as any, + model: getModel(currentProvider, currentModel, serverEnv, apiKeys, providerSettings) as any, system: systemPrompt, maxTokens: dynamicMaxTokens, messages: convertToCoreMessages(processedMessages as any), diff --git a/app/types/model.ts b/app/types/model.ts index 3bfbfde..a747a3f 100644 --- a/app/types/model.ts +++ b/app/types/model.ts @@ -3,7 +3,11 @@ import type { ModelInfo } from '~/utils/types'; export type ProviderInfo = { staticModels: ModelInfo[]; name: string; - getDynamicModels?: (apiKeys?: Record, providerSettings?: IProviderSetting) => Promise; + getDynamicModels?: ( + apiKeys?: Record, + providerSettings?: IProviderSetting, + serverEnv?: Record, + ) => Promise; getApiKeyLink?: string; labelForGetApiKey?: string; icon?: string; diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 6425995..6595d9c 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -220,7 +220,6 @@ const PROVIDER_LIST: ProviderInfo[] = [ ], getApiKeyLink: 'https://huggingface.co/settings/tokens', }, - { name: 'OpenAI', staticModels: [ @@ -325,26 +324,46 @@ const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat( export let MODEL_LIST: ModelInfo[] = [...staticModels]; -export async function getModelList( - apiKeys: Record, - providerSettings?: Record, -) { +export async function getModelList(options: { + apiKeys?: Record; + providerSettings?: Record; + serverEnv?: Record; +}) { + const { apiKeys, providerSettings, serverEnv } = options; + + // console.log({ providerSettings, serverEnv,env:process.env }); MODEL_LIST = [ ...( await Promise.all( PROVIDER_LIST.filter( (p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels, - ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])), + ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name], serverEnv)), ) ).flat(), ...staticModels, ]; + return MODEL_LIST; } -async function getTogetherModels(apiKeys?: Record, settings?: IProviderSetting): Promise { +async function getTogetherModels( + apiKeys?: Record, + settings?: IProviderSetting, + serverEnv: Record = {}, +): Promise { try { - const baseUrl = settings?.baseUrl || import.meta.env.TOGETHER_API_BASE_URL || ''; + let settingsBaseUrl = settings?.baseUrl; + + if (settingsBaseUrl && settingsBaseUrl.length == 0) { + settingsBaseUrl = undefined; + } + + const baseUrl = + settingsBaseUrl || + serverEnv?.TOGETHER_API_BASE_URL || + process.env.TOGETHER_API_BASE_URL || + import.meta.env.TOGETHER_API_BASE_URL || + ''; const provider = 'Together'; if (!baseUrl) { @@ -383,8 +402,19 @@ async function getTogetherModels(apiKeys?: Record, settings?: IP } } -const getOllamaBaseUrl = (settings?: IProviderSetting) => { - const defaultBaseUrl = settings?.baseUrl || import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434'; +const getOllamaBaseUrl = (settings?: IProviderSetting, serverEnv: Record = {}) => { + let settingsBaseUrl = settings?.baseUrl; + + if (settingsBaseUrl && settingsBaseUrl.length == 0) { + settingsBaseUrl = undefined; + } + + const defaultBaseUrl = + settings?.baseUrl || + serverEnv?.OLLAMA_API_BASE_URL || + process.env.OLLAMA_API_BASE_URL || + import.meta.env.OLLAMA_API_BASE_URL || + 'http://localhost:11434'; // Check if we're in the browser if (typeof window !== 'undefined') { @@ -398,9 +428,13 @@ const getOllamaBaseUrl = (settings?: IProviderSetting) => { return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl; }; -async function getOllamaModels(apiKeys?: Record, settings?: IProviderSetting): Promise { +async function getOllamaModels( + apiKeys?: Record, + settings?: IProviderSetting, + serverEnv: Record = {}, +): Promise { try { - const baseUrl = getOllamaBaseUrl(settings); + const baseUrl = getOllamaBaseUrl(settings, serverEnv); const response = await fetch(`${baseUrl}/api/tags`); const data = (await response.json()) as OllamaApiResponse; @@ -421,9 +455,21 @@ async function getOllamaModels(apiKeys?: Record, settings?: IPro async function getOpenAILikeModels( apiKeys?: Record, settings?: IProviderSetting, + serverEnv: Record = {}, ): Promise { try { - const baseUrl = settings?.baseUrl || import.meta.env.OPENAI_LIKE_API_BASE_URL || ''; + let settingsBaseUrl = settings?.baseUrl; + + if (settingsBaseUrl && settingsBaseUrl.length == 0) { + settingsBaseUrl = undefined; + } + + const baseUrl = + settingsBaseUrl || + serverEnv.OPENAI_LIKE_API_BASE_URL || + process.env.OPENAI_LIKE_API_BASE_URL || + import.meta.env.OPENAI_LIKE_API_BASE_URL || + ''; if (!baseUrl) { return []; @@ -486,9 +532,24 @@ async function getOpenRouterModels(): Promise { })); } -async function getLMStudioModels(_apiKeys?: Record, settings?: IProviderSetting): Promise { +async function getLMStudioModels( + _apiKeys?: Record, + settings?: IProviderSetting, + serverEnv: Record = {}, +): Promise { try { - const baseUrl = settings?.baseUrl || import.meta.env.LMSTUDIO_API_BASE_URL || 'http://localhost:1234'; + let settingsBaseUrl = settings?.baseUrl; + + if (settingsBaseUrl && settingsBaseUrl.length == 0) { + settingsBaseUrl = undefined; + } + + const baseUrl = + settingsBaseUrl || + serverEnv.LMSTUDIO_API_BASE_URL || + process.env.LMSTUDIO_API_BASE_URL || + import.meta.env.LMSTUDIO_API_BASE_URL || + 'http://localhost:1234'; const response = await fetch(`${baseUrl}/v1/models`); const data = (await response.json()) as any; @@ -503,29 +564,37 @@ async function getLMStudioModels(_apiKeys?: Record, settings?: I } } -async function initializeModelList(providerSettings?: Record): Promise { - let apiKeys: Record = {}; +async function initializeModelList(options: { + env?: Record; + providerSettings?: Record; + apiKeys?: Record; +}): Promise { + const { providerSettings, apiKeys: providedApiKeys, env } = options; + let apiKeys: Record = providedApiKeys || {}; - try { - const storedApiKeys = Cookies.get('apiKeys'); + if (!providedApiKeys) { + try { + const storedApiKeys = Cookies.get('apiKeys'); - if (storedApiKeys) { - const parsedKeys = JSON.parse(storedApiKeys); + if (storedApiKeys) { + const parsedKeys = JSON.parse(storedApiKeys); - if (typeof parsedKeys === 'object' && parsedKeys !== null) { - apiKeys = parsedKeys; + if (typeof parsedKeys === 'object' && parsedKeys !== null) { + apiKeys = parsedKeys; + } } + } catch (error: any) { + logStore.logError('Failed to fetch API keys from cookies', error); + logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`); } - } catch (error: any) { - logStore.logError('Failed to fetch API keys from cookies', error); - logger.warn(`Failed to fetch apikeys from cookies: ${error?.message}`); } + MODEL_LIST = [ ...( await Promise.all( PROVIDER_LIST.filter( (p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels, - ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name])), + ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name], env)), ) ).flat(), ...staticModels, @@ -534,6 +603,7 @@ async function initializeModelList(providerSettings?: Record Date: Wed, 18 Dec 2024 06:35:33 -0500 Subject: [PATCH 09/17] docs: simplified setup Removed the section about environment variables. In docker removed the production build. Both of these can still be found in the docs. --- README.md | 43 +++++++++---------------------------------- 1 file changed, 9 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 84235f8..c7387ee 100644 --- a/README.md +++ b/README.md @@ -95,34 +95,6 @@ Clone the repository using Git: git clone -b stable https://github.com/stackblitz-labs/bolt.diy ``` -### (Optional) Configure Environment Variables - -Most environment variables can be configured directly through the settings menu of the application. However, if you need to manually configure them: - -1. Rename `.env.example` to `.env.local`. -2. Add your LLM API keys. For example: - -```env -GROQ_API_KEY=YOUR_GROQ_API_KEY -OPENAI_API_KEY=YOUR_OPENAI_API_KEY -ANTHROPIC_API_KEY=YOUR_ANTHROPIC_API_KEY -``` - -**Note**: Ollama does not require an API key as it runs locally. - -3. Optionally, set additional configurations: - -```env -# Debugging -VITE_LOG_LEVEL=debug - -# Ollama settings (example: 8K context, localhost port 11434) -OLLAMA_API_BASE_URL=http://localhost:11434 -DEFAULT_NUM_CTX=8192 -``` - -**Important**: Do not commit your `.env.local` file to version control. This file is already included in `.gitignore`. - --- ## Run the Application @@ -155,27 +127,30 @@ DEFAULT_NUM_CTX=8192 Use the provided NPM scripts: ```bash - npm run dockerbuild # Development build - npm run dockerbuild:prod # Production build + npm run dockerbuild ``` Alternatively, use Docker commands directly: ```bash - docker build . --target bolt-ai-development # Development build - docker build . --target bolt-ai-production # Production build + docker build . --target bolt-ai-development ``` 2. **Run the Container**: Use Docker Compose profiles to manage environments: ```bash - docker-compose --profile development up # Development - docker-compose --profile production up # Production + docker-compose --profile development up ``` - With the development profile, changes to your code will automatically reflect in the running container (hot reloading). --- +### Entering API Keys + +All of your API Keys can be configured directly in the application. Just selecte the provider you want from the dropdown and click the pencile icon to enter your API key. + +--- + ### Update Your Local Version to the Latest To keep your local version of bolt.diy up to date with the latest changes, follow these steps for your operating system: From a1e902fe7963fa2008c8b30f1cb8be9281146bb2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Dec 2024 12:31:51 +0000 Subject: [PATCH 10/17] chore: update commit hash to a9309161e95a8ed015f2f71b622fb63afdb74877 --- app/commit.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/commit.json b/app/commit.json index 7ff7537..1386a05 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "e064803955604198c6aac7b257efd0ad8503cb73", "version": "0.0.3" } +{ "commit": "a9309161e95a8ed015f2f71b622fb63afdb74877", "version": "0.0.3" } From b3b4d74824f63853474bd8dc21754ee8a53f72c4 Mon Sep 17 00:00:00 2001 From: Cole Medin Date: Wed, 18 Dec 2024 07:01:58 -0600 Subject: [PATCH 11/17] chore: a few documentation enhancements for main README and FAQs --- FAQ.md | 32 ++++++++++++++++++++++++-------- README.md | 6 ++++-- docs/docs/FAQ.md | 20 ++++++++++++++++++++ 3 files changed, 48 insertions(+), 10 deletions(-) diff --git a/FAQ.md b/FAQ.md index ecd4158..dcf250d 100644 --- a/FAQ.md +++ b/FAQ.md @@ -2,6 +2,18 @@ # bolt.diy +## Recommended Models for bolt.diy + +For the best experience with bolt.diy, we recommend using the following models: + +- **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases +- **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance +- **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities +- **DeepSeekCoder V2 236b**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted) +- **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements + +**Note**: Models with less than 7b parameters typically lack the capability to properly interact with bolt! + ## FAQ ### How do I get the best results with bolt.diy? @@ -34,14 +46,18 @@ We have seen this error a couple times and for some reason just restarting the D We promise you that we are constantly testing new PRs coming into bolt.diy and the preview is core functionality, so the application is not broken! When you get a blank preview or don’t get a preview, this is generally because the LLM hallucinated bad code or incorrect commands. We are working on making this more transparent so it is obvious. Sometimes the error will appear in developer console too so check that as well. -### How to add a LLM: - -To make new LLMs available to use in this version of bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider. - -By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish! - -When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here! - ### Everything works but the results are bad This goes to the point above about how local LLMs are getting very powerful but you still are going to see better (sometimes much better) results with the largest LLMs like GPT-4o, Claude 3.5 Sonnet, and DeepSeek Coder V2 236b. If you are using smaller LLMs like Qwen-2.5-Coder, consider it more experimental and educational at this point. It can build smaller applications really well, which is super impressive for a local LLM, but for larger scale applications you want to use the larger LLMs still! + +### Received structured exception #0xc0000005: access violation + +If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) + +### How to add an LLM: + +To make new LLMs available to use in this version of bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider. + +By default, many providers are already implemented, but the YouTube video for this repo covers how to extend this to work with more providers if you wish! + +When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. diff --git a/README.md b/README.md index c7387ee..7ad4abe 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,9 @@ Welcome to bolt.diy, the official open source version of Bolt.new (previously known as oTToDev and bolt.new ANY LLM), which allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models. -Check the [bolt.diy Docs](https://stackblitz-labs.github.io/bolt.diy/) for more information. This documentation is still being updated after the transfer. +Check the [bolt.diy Docs](https://stackblitz-labs.github.io/bolt.diy/) for more information. + +We have also launched an experimental agent called the "bolt.diy Expert" that can answer common questions about bolt.diy. Find it here on the [oTTomator Live Agent Studio](https://studio.ottomator.ai/). bolt.diy was originally started by [Cole Medin](https://www.youtube.com/@ColeMedin) but has quickly grown into a massive community effort to build the BEST open source AI coding assistant! @@ -211,4 +213,4 @@ Explore upcoming features and priorities on our [Roadmap](https://roadmap.sh/r/o ## FAQ -For answers to common questions, visit our [FAQ Page](FAQ.md). +For answers to common questions, issues, and to see a list of recommended models, visit our [FAQ Page](FAQ.md). diff --git a/docs/docs/FAQ.md b/docs/docs/FAQ.md index 9f18a88..1b645d3 100644 --- a/docs/docs/FAQ.md +++ b/docs/docs/FAQ.md @@ -1,5 +1,19 @@ # Frequently Asked Questions (FAQ) +## What are the best models for bolt.diy? + +For the best experience with bolt.diy, we recommend using the following models: + +- **Claude 3.5 Sonnet (old)**: Best overall coder, providing excellent results across all use cases +- **Gemini 2.0 Flash**: Exceptional speed while maintaining good performance +- **GPT-4o**: Strong alternative to Claude 3.5 Sonnet with comparable capabilities +- **DeepSeekCoder V2 236b**: Best open source model (available through OpenRouter, DeepSeek API, or self-hosted) +- **Qwen 2.5 Coder 32b**: Best model for self-hosting with reasonable hardware requirements + +**Note**: Models with less than 7b parameters typically lack the capability to properly interact with bolt! + +--- + ## How do I get the best results with bolt.diy? - **Be specific about your stack**: @@ -72,6 +86,12 @@ Local LLMs like Qwen-2.5-Coder are powerful for small applications but still exp --- +### **"Received structured exception #0xc0000005: access violation"** + +If you are getting this, you are probably on Windows. The fix is generally to update the [Visual C++ Redistributable](https://learn.microsoft.com/en-us/cpp/windows/latest-supported-vc-redist?view=msvc-170) + +--- + ### **"Miniflare or Wrangler errors in Windows"** You will need to make sure you have the latest version of Visual Studio C++ installed (14.40.33816), more information here https://github.com/stackblitz-labs/bolt.diy/issues/19. From 283eb22ae57cfd2341fc5a2748f2f4b5d8107ef1 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 20:04:43 +0530 Subject: [PATCH 12/17] added indicator on settings menu --- app/commit.json | 2 +- app/components/settings/debug/DebugTab.tsx | 17 +- .../settings/providers/ProvidersTab.tsx | 119 ++++++----- app/lib/.server/llm/api-key.ts | 29 ++- app/types/model.ts | 1 + app/utils/constants.ts | 198 ++++++++++++------ vite.config.ts | 2 +- 7 files changed, 237 insertions(+), 131 deletions(-) diff --git a/app/commit.json b/app/commit.json index 4ff5294..b0a4c92 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "fce8999f27c0affbc762dc90de992b5a759ab325" } +{ "commit": "62ebfe51a69788229aa62d34afb4ce89b7cd8ac8" } diff --git a/app/components/settings/debug/DebugTab.tsx b/app/components/settings/debug/DebugTab.tsx index cf2341b..b0cde7d 100644 --- a/app/components/settings/debug/DebugTab.tsx +++ b/app/components/settings/debug/DebugTab.tsx @@ -2,6 +2,7 @@ import React, { useCallback, useEffect, useState } from 'react'; import { useSettings } from '~/lib/hooks/useSettings'; import commit from '~/commit.json'; import { toast } from 'react-toastify'; +import { providerBaseUrlEnvKeys } from '~/utils/constants'; interface ProviderStatus { name: string; @@ -236,7 +237,7 @@ const checkProviderStatus = async (url: string | null, providerName: string): Pr } // Try different endpoints based on provider - const checkUrls = [`${url}/api/health`, `${url}/v1/models`]; + const checkUrls = [`${url}/api/health`, url.endsWith('v1') ? `${url}/models` : `${url}/v1/models`]; console.log(`[Debug] Checking additional endpoints:`, checkUrls); const results = await Promise.all( @@ -321,14 +322,16 @@ export default function DebugTab() { .filter(([, provider]) => LOCAL_PROVIDERS.includes(provider.name)) .map(async ([, provider]) => { const envVarName = - provider.name.toLowerCase() === 'ollama' - ? 'OLLAMA_API_BASE_URL' - : provider.name.toLowerCase() === 'lmstudio' - ? 'LMSTUDIO_API_BASE_URL' - : `REACT_APP_${provider.name.toUpperCase()}_URL`; + providerBaseUrlEnvKeys[provider.name].baseUrlKey || `REACT_APP_${provider.name.toUpperCase()}_URL`; // Access environment variables through import.meta.env - const url = import.meta.env[envVarName] || provider.settings.baseUrl || null; // Ensure baseUrl is used + let settingsUrl = provider.settings.baseUrl; + + if (settingsUrl && settingsUrl.trim().length === 0) { + settingsUrl = undefined; + } + + const url = settingsUrl || import.meta.env[envVarName] || null; // Ensure baseUrl is used console.log(`[Debug] Using URL for ${provider.name}:`, url, `(from ${envVarName})`); const status = await checkProviderStatus(url, provider.name); diff --git a/app/components/settings/providers/ProvidersTab.tsx b/app/components/settings/providers/ProvidersTab.tsx index 49a16f6..20e66ef 100644 --- a/app/components/settings/providers/ProvidersTab.tsx +++ b/app/components/settings/providers/ProvidersTab.tsx @@ -7,6 +7,7 @@ import { logStore } from '~/lib/stores/logs'; // Import a default fallback icon import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary +import { providerBaseUrlEnvKeys } from '~/utils/constants'; export default function ProvidersTab() { const { providers, updateProviderSettings, isLocalModel } = useSettings(); @@ -47,65 +48,77 @@ export default function ProvidersTab() { className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor" /> - {filteredProviders.map((provider) => ( -
-
-
- { - // Fallback to default icon on error - e.currentTarget.src = DefaultIcon; - }} - alt={`${provider.name} icon`} - className="w-6 h-6 dark:invert" - /> - {provider.name} -
- { - updateProviderSettings(provider.name, { ...provider.settings, enabled }); + {filteredProviders.map((provider) => { + const envBaseUrlKey = providerBaseUrlEnvKeys[provider.name].baseUrlKey; + const envBaseUrl = envBaseUrlKey ? import.meta.env[envBaseUrlKey] : undefined; - if (enabled) { - logStore.logProvider(`Provider ${provider.name} enabled`, { provider: provider.name }); - } else { - logStore.logProvider(`Provider ${provider.name} disabled`, { provider: provider.name }); - } - }} - /> -
- {/* Base URL input for configurable providers */} - {URL_CONFIGURABLE_PROVIDERS.includes(provider.name) && provider.settings.enabled && ( -
- - { - let newBaseUrl: string | undefined = e.target.value; + return ( +
+
+
+ { + // Fallback to default icon on error + e.currentTarget.src = DefaultIcon; + }} + alt={`${provider.name} icon`} + className="w-6 h-6 dark:invert" + /> + {provider.name} +
+ { + updateProviderSettings(provider.name, { ...provider.settings, enabled }); - if (newBaseUrl && newBaseUrl.trim().length === 0) { - newBaseUrl = undefined; + if (enabled) { + logStore.logProvider(`Provider ${provider.name} enabled`, { provider: provider.name }); + } else { + logStore.logProvider(`Provider ${provider.name} disabled`, { provider: provider.name }); } - - updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl }); - logStore.logProvider(`Base URL updated for ${provider.name}`, { - provider: provider.name, - baseUrl: newBaseUrl, - }); }} - placeholder={`Enter ${provider.name} base URL`} - className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor" />
- )} -
- ))} + {/* Base URL input for configurable providers */} + {URL_CONFIGURABLE_PROVIDERS.includes(provider.name) && provider.settings.enabled && ( +
+ {envBaseUrl && ( + + )} + + { + let newBaseUrl: string | undefined = e.target.value; + + if (newBaseUrl && newBaseUrl.trim().length === 0) { + newBaseUrl = undefined; + } + + updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl }); + logStore.logProvider(`Base URL updated for ${provider.name}`, { + provider: provider.name, + baseUrl: newBaseUrl, + }); + }} + placeholder={`Enter ${provider.name} base URL`} + className="w-full bg-white dark:bg-bolt-elements-background-depth-4 relative px-2 py-1.5 rounded-md focus:outline-none placeholder-bolt-elements-textTertiary text-bolt-elements-textPrimary dark:text-bolt-elements-textPrimary border border-bolt-elements-borderColor" + /> +
+ )} +
+ ); + })}
); } diff --git a/app/lib/.server/llm/api-key.ts b/app/lib/.server/llm/api-key.ts index d21f070..83b4646 100644 --- a/app/lib/.server/llm/api-key.ts +++ b/app/lib/.server/llm/api-key.ts @@ -4,6 +4,7 @@ */ import { env } from 'node:process'; import type { IProviderSetting } from '~/types/model'; +import { getProviderBaseUrlAndKey } from '~/utils/constants'; export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Record) { /** @@ -16,7 +17,20 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re return userApiKeys[provider]; } - // Fall back to environment variables + const { apiKey } = getProviderBaseUrlAndKey({ + provider, + apiKeys: userApiKeys, + providerSettings: undefined, + serverEnv: cloudflareEnv as any, + defaultBaseUrlKey: '', + defaultApiTokenKey: '', + }); + + if (apiKey) { + return apiKey; + } + + // Fall back to hardcoded environment variables names switch (provider) { case 'Anthropic': return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY; @@ -52,6 +66,19 @@ export function getAPIKey(cloudflareEnv: Env, provider: string, userApiKeys?: Re } export function getBaseURL(cloudflareEnv: Env, provider: string, providerSettings?: Record) { + const { baseUrl } = getProviderBaseUrlAndKey({ + provider, + apiKeys: {}, + providerSettings, + serverEnv: cloudflareEnv as any, + defaultBaseUrlKey: '', + defaultApiTokenKey: '', + }); + + if (baseUrl) { + return baseUrl; + } + let settingBaseUrl = providerSettings?.[provider].baseUrl; if (settingBaseUrl && settingBaseUrl.length == 0) { diff --git a/app/types/model.ts b/app/types/model.ts index a747a3f..b449363 100644 --- a/app/types/model.ts +++ b/app/types/model.ts @@ -4,6 +4,7 @@ export type ProviderInfo = { staticModels: ModelInfo[]; name: string; getDynamicModels?: ( + providerName: string, apiKeys?: Record, providerSettings?: IProviderSetting, serverEnv?: Record, diff --git a/app/utils/constants.ts b/app/utils/constants.ts index 6595d9c..c4eb0ae 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -318,6 +318,83 @@ const PROVIDER_LIST: ProviderInfo[] = [ }, ]; +export const providerBaseUrlEnvKeys: Record = { + Anthropic: { + apiTokenKey: 'ANTHROPIC_API_KEY', + }, + OpenAI: { + apiTokenKey: 'OPENAI_API_KEY', + }, + Groq: { + apiTokenKey: 'GROQ_API_KEY', + }, + HuggingFace: { + apiTokenKey: 'HuggingFace_API_KEY', + }, + OpenRouter: { + apiTokenKey: 'OPEN_ROUTER_API_KEY', + }, + Google: { + apiTokenKey: 'GOOGLE_GENERATIVE_AI_API_KEY', + }, + OpenAILike: { + baseUrlKey: 'OPENAI_LIKE_API_BASE_URL', + apiTokenKey: 'OPENAI_LIKE_API_KEY', + }, + Together: { + baseUrlKey: 'TOGETHER_API_BASE_URL', + apiTokenKey: 'TOGETHER_API_KEY', + }, + Deepseek: { + apiTokenKey: 'DEEPSEEK_API_KEY', + }, + Mistral: { + apiTokenKey: 'MISTRAL_API_KEY', + }, + LMStudio: { + baseUrlKey: 'LMSTUDIO_API_BASE_URL', + }, + xAI: { + apiTokenKey: 'XAI_API_KEY', + }, + Cohere: { + apiTokenKey: 'COHERE_API_KEY', + }, + Perplexity: { + apiTokenKey: 'PERPLEXITY_API_KEY', + }, + Ollama: { + baseUrlKey: 'OLLAMA_API_BASE_URL', + }, +}; + +export const getProviderBaseUrlAndKey = (options: { + provider: string; + apiKeys?: Record; + providerSettings?: IProviderSetting; + serverEnv?: Record; + defaultBaseUrlKey: string; + defaultApiTokenKey: string; +}) => { + const { provider, apiKeys, providerSettings, serverEnv, defaultBaseUrlKey, defaultApiTokenKey } = options; + let settingsBaseUrl = providerSettings?.baseUrl; + + if (settingsBaseUrl && settingsBaseUrl.length == 0) { + settingsBaseUrl = undefined; + } + + const baseUrlKey = providerBaseUrlEnvKeys[provider]?.baseUrlKey || defaultBaseUrlKey; + const baseUrl = settingsBaseUrl || serverEnv?.[baseUrlKey] || process.env[baseUrlKey] || import.meta.env[baseUrlKey]; + + const apiTokenKey = providerBaseUrlEnvKeys[provider]?.apiTokenKey || defaultApiTokenKey; + const apiKey = + apiKeys?.[provider] || serverEnv?.[apiTokenKey] || process.env[apiTokenKey] || import.meta.env[apiTokenKey]; + + return { + baseUrl, + apiKey, + }; +}; export const DEFAULT_PROVIDER = PROVIDER_LIST[0]; const staticModels: ModelInfo[] = PROVIDER_LIST.map((p) => p.staticModels).flat(); @@ -337,7 +414,7 @@ export async function getModelList(options: { await Promise.all( PROVIDER_LIST.filter( (p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels, - ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name], serverEnv)), + ).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], serverEnv)), ) ).flat(), ...staticModels, @@ -347,35 +424,26 @@ export async function getModelList(options: { } async function getTogetherModels( + name: string, apiKeys?: Record, settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { try { - let settingsBaseUrl = settings?.baseUrl; - - if (settingsBaseUrl && settingsBaseUrl.length == 0) { - settingsBaseUrl = undefined; - } - - const baseUrl = - settingsBaseUrl || - serverEnv?.TOGETHER_API_BASE_URL || - process.env.TOGETHER_API_BASE_URL || - import.meta.env.TOGETHER_API_BASE_URL || - ''; - const provider = 'Together'; + const { baseUrl, apiKey } = getProviderBaseUrlAndKey({ + provider: name, + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'TOGETHER_API_BASE_URL', + defaultApiTokenKey: 'TOGETHER_API_KEY', + }); + console.log({ baseUrl, apiKey }); if (!baseUrl) { return []; } - let apiKey = import.meta.env.OPENAI_LIKE_API_KEY ?? ''; - - if (apiKeys && apiKeys[provider]) { - apiKey = apiKeys[provider]; - } - if (!apiKey) { return []; } @@ -393,7 +461,7 @@ async function getTogetherModels( label: `${m.display_name} - in:$${m.pricing.input.toFixed( 2, )} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`, - provider, + provider: name, maxTokenAllowed: 8000, })); } catch (e) { @@ -402,39 +470,40 @@ async function getTogetherModels( } } -const getOllamaBaseUrl = (settings?: IProviderSetting, serverEnv: Record = {}) => { - let settingsBaseUrl = settings?.baseUrl; - - if (settingsBaseUrl && settingsBaseUrl.length == 0) { - settingsBaseUrl = undefined; - } - - const defaultBaseUrl = - settings?.baseUrl || - serverEnv?.OLLAMA_API_BASE_URL || - process.env.OLLAMA_API_BASE_URL || - import.meta.env.OLLAMA_API_BASE_URL || - 'http://localhost:11434'; +const getOllamaBaseUrl = (name: string, settings?: IProviderSetting, serverEnv: Record = {}) => { + const { baseUrl } = getProviderBaseUrlAndKey({ + provider: name, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'OLLAMA_API_BASE_URL', + defaultApiTokenKey: '', + }); // Check if we're in the browser if (typeof window !== 'undefined') { // Frontend always uses localhost - return defaultBaseUrl; + return baseUrl; } // Backend: Check if we're running in Docker const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; - return isDocker ? defaultBaseUrl.replace('localhost', 'host.docker.internal') : defaultBaseUrl; + return isDocker ? baseUrl.replace('localhost', 'host.docker.internal') : baseUrl; }; async function getOllamaModels( - apiKeys?: Record, + name: string, + _apiKeys?: Record, settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { try { - const baseUrl = getOllamaBaseUrl(settings, serverEnv); + const baseUrl = getOllamaBaseUrl(name, settings, serverEnv); + + if (!baseUrl) { + return []; + } + const response = await fetch(`${baseUrl}/api/tags`); const data = (await response.json()) as OllamaApiResponse; @@ -453,34 +522,25 @@ async function getOllamaModels( } async function getOpenAILikeModels( + name: string, apiKeys?: Record, settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { try { - let settingsBaseUrl = settings?.baseUrl; - - if (settingsBaseUrl && settingsBaseUrl.length == 0) { - settingsBaseUrl = undefined; - } - - const baseUrl = - settingsBaseUrl || - serverEnv.OPENAI_LIKE_API_BASE_URL || - process.env.OPENAI_LIKE_API_BASE_URL || - import.meta.env.OPENAI_LIKE_API_BASE_URL || - ''; + const { baseUrl, apiKey } = getProviderBaseUrlAndKey({ + provider: name, + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL', + defaultApiTokenKey: 'OPENAI_LIKE_API_KEY', + }); if (!baseUrl) { return []; } - let apiKey = ''; - - if (apiKeys && apiKeys.OpenAILike) { - apiKey = apiKeys.OpenAILike; - } - const response = await fetch(`${baseUrl}/models`, { headers: { Authorization: `Bearer ${apiKey}`, @@ -491,7 +551,7 @@ async function getOpenAILikeModels( return res.data.map((model: any) => ({ name: model.id, label: model.id, - provider: 'OpenAILike', + provider: name, })); } catch (e) { console.error('Error getting OpenAILike models:', e); @@ -533,23 +593,25 @@ async function getOpenRouterModels(): Promise { } async function getLMStudioModels( - _apiKeys?: Record, + name: string, + apiKeys?: Record, settings?: IProviderSetting, serverEnv: Record = {}, ): Promise { try { - let settingsBaseUrl = settings?.baseUrl; + const { baseUrl } = getProviderBaseUrlAndKey({ + provider: name, + apiKeys, + providerSettings: settings, + serverEnv, + defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL', + defaultApiTokenKey: '', + }); - if (settingsBaseUrl && settingsBaseUrl.length == 0) { - settingsBaseUrl = undefined; + if (!baseUrl) { + return []; } - const baseUrl = - settingsBaseUrl || - serverEnv.LMSTUDIO_API_BASE_URL || - process.env.LMSTUDIO_API_BASE_URL || - import.meta.env.LMSTUDIO_API_BASE_URL || - 'http://localhost:1234'; const response = await fetch(`${baseUrl}/v1/models`); const data = (await response.json()) as any; @@ -594,7 +656,7 @@ async function initializeModelList(options: { await Promise.all( PROVIDER_LIST.filter( (p): p is ProviderInfo & { getDynamicModels: () => Promise } => !!p.getDynamicModels, - ).map((p) => p.getDynamicModels(apiKeys, providerSettings?.[p.name], env)), + ).map((p) => p.getDynamicModels(p.name, apiKeys, providerSettings?.[p.name], env)), ) ).flat(), ...staticModels, diff --git a/vite.config.ts b/vite.config.ts index f18b8b9..d96f704 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -28,7 +28,7 @@ export default defineConfig((config) => { chrome129IssuePlugin(), config.mode === 'production' && optimizeCssModules({ apply: 'build' }), ], - envPrefix: ["VITE_", "OPENAI_LIKE_API_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"], + envPrefix: ["VITE_", "OPENAI_LIKE_API_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_"], css: { preprocessorOptions: { scss: { From 90c9c9c760d672d6c9417f56ee1ffb130c6b32f0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Dec 2024 15:12:36 +0000 Subject: [PATCH 13/17] chore: update commit hash to 6458211bed379396e797e6da2944f6627a428c40 --- app/commit.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/commit.json b/app/commit.json index 1386a05..f60c148 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "a9309161e95a8ed015f2f71b622fb63afdb74877", "version": "0.0.3" } +{ "commit": "6458211bed379396e797e6da2944f6627a428c40", "version": "0.0.3" } From 6975083fb9565b94b7e73064097f35c3f11527dc Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 21:31:50 +0530 Subject: [PATCH 14/17] stopped apikeys set to envfile being exposed to UI --- app/commit.json | 2 +- vite.config.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/app/commit.json b/app/commit.json index 393abd3..f8c2532 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "e74d6cafb53f6eb2bb80c32014b27ac0aa56e7fe" } +{ "commit": "26a3bcf9b6401e606b5063830550cd6022f73899" } diff --git a/vite.config.ts b/vite.config.ts index d96f704..f292c7b 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -28,7 +28,7 @@ export default defineConfig((config) => { chrome129IssuePlugin(), config.mode === 'production' && optimizeCssModules({ apply: 'build' }), ], - envPrefix: ["VITE_", "OPENAI_LIKE_API_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_"], + envPrefix: ["VITE_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"], css: { preprocessorOptions: { scss: { From d37c3736d5e73b0305f19d1bbc7c47a6dfbf7656 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 21:34:18 +0530 Subject: [PATCH 15/17] removed logs --- app/commit.json | 2 +- app/lib/.server/llm/model.ts | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/app/commit.json b/app/commit.json index f8c2532..cd2195b 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "26a3bcf9b6401e606b5063830550cd6022f73899" } +{ "commit": "6975083fb9565b94b7e73064097f35c3f11527dc" } diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index 1feb499..308e27d 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -84,8 +84,6 @@ export function getHuggingFaceModel(apiKey: OptionalApiKey, model: string) { } export function getOllamaModel(baseURL: string, model: string) { - console.log({ baseURL, model }); - const ollamaInstance = ollama(model, { numCtx: DEFAULT_NUM_CTX, }) as LanguageModelV1 & { config: any }; From b892d708ec5ec5396b810bad48cf3dedb2b05769 Mon Sep 17 00:00:00 2001 From: Anirban Kar Date: Wed, 18 Dec 2024 21:55:17 +0530 Subject: [PATCH 16/17] removed logs and openAI fix --- app/commit.json | 2 +- app/utils/constants.ts | 2 -- vite.config.ts | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/app/commit.json b/app/commit.json index cd2195b..432fff8 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "6975083fb9565b94b7e73064097f35c3f11527dc" } +{ "commit": "d37c3736d5e73b0305f19d1bbc7c47a6dfbf7656" } diff --git a/app/utils/constants.ts b/app/utils/constants.ts index c4eb0ae..b80b3c8 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -408,7 +408,6 @@ export async function getModelList(options: { }) { const { apiKeys, providerSettings, serverEnv } = options; - // console.log({ providerSettings, serverEnv,env:process.env }); MODEL_LIST = [ ...( await Promise.all( @@ -438,7 +437,6 @@ async function getTogetherModels( defaultBaseUrlKey: 'TOGETHER_API_BASE_URL', defaultApiTokenKey: 'TOGETHER_API_KEY', }); - console.log({ baseUrl, apiKey }); if (!baseUrl) { return []; diff --git a/vite.config.ts b/vite.config.ts index f292c7b..b2f795d 100644 --- a/vite.config.ts +++ b/vite.config.ts @@ -28,7 +28,7 @@ export default defineConfig((config) => { chrome129IssuePlugin(), config.mode === 'production' && optimizeCssModules({ apply: 'build' }), ], - envPrefix: ["VITE_", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"], + envPrefix: ["VITE_","OPENAI_LIKE_API_BASE_URL", "OLLAMA_API_BASE_URL", "LMSTUDIO_API_BASE_URL","TOGETHER_API_BASE_URL"], css: { preprocessorOptions: { scss: { From 296e5d9f779a818210caed974b75ca8e5644ea8c Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 18 Dec 2024 16:28:44 +0000 Subject: [PATCH 17/17] chore: update commit hash to 50e677878446f622531123b19912f38e8246afbd --- app/commit.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/commit.json b/app/commit.json index f60c148..71f2fed 100644 --- a/app/commit.json +++ b/app/commit.json @@ -1 +1 @@ -{ "commit": "6458211bed379396e797e6da2944f6627a428c40", "version": "0.0.3" } +{ "commit": "50e677878446f622531123b19912f38e8246afbd", "version": "0.0.3" }