Merge remote-tracking branch 'upstream/main' into linting

This commit is contained in:
Oliver Jägle 2024-11-22 12:25:39 +01:00
commit a203f4c34b
No known key found for this signature in database
GPG Key ID: 866E2BD1777473E9
5 changed files with 41 additions and 7 deletions

View File

@ -65,4 +65,12 @@ LMSTUDIO_API_BASE_URL=
XAI_API_KEY=
# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug
VITE_LOG_LEVEL=debug
# Example Context Values for qwen2.5-coder:32b
#
# DEFAULT_NUM_CTX=32768 # Consumes 36GB of VRAM
# DEFAULT_NUM_CTX=24576 # Consumes 32GB of VRAM
# DEFAULT_NUM_CTX=12288 # Consumes 26GB of VRAM
# DEFAULT_NUM_CTX=6144 # Consumes 24GB of VRAM
DEFAULT_NUM_CTX=

View File

@ -1,4 +1,7 @@
# Contributing to Bolt.new Fork
## DEFAULT_NUM_CTX
The `DEFAULT_NUM_CTX` environment variable can be used to limit the maximum number of context values used by the qwen2.5-coder model. For example, to limit the context to 24576 values (which uses 32GB of VRAM), set `DEFAULT_NUM_CTX=24576` in your `.env.local` file.
First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide.
@ -81,6 +84,19 @@ ANTHROPIC_API_KEY=XXX
```bash
VITE_LOG_LEVEL=debug
```
- Optionally set context size:
```bash
DEFAULT_NUM_CTX=32768
```
Some Example Context Values for the qwen2.5-coder:32b models are.
* DEFAULT_NUM_CTX=32768 - Consumes 36GB of VRAM
* DEFAULT_NUM_CTX=24576 - Consumes 32GB of VRAM
* DEFAULT_NUM_CTX=12288 - Consumes 26GB of VRAM
* DEFAULT_NUM_CTX=6144 - Consumes 24GB of VRAM
**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore.
### 🚀 Running the Development Server

View File

@ -26,6 +26,7 @@ ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG VITE_LOG_LEVEL=debug
ARG DEFAULT_NUM_CTX
ENV WRANGLER_SEND_METRICS=false \
GROQ_API_KEY=${GROQ_API_KEY} \
@ -35,7 +36,8 @@ ENV WRANGLER_SEND_METRICS=false \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
# Pre-configure wrangler to disable metrics
RUN mkdir -p /root/.config/.wrangler && \
@ -57,6 +59,7 @@ ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG VITE_LOG_LEVEL=debug
ARG DEFAULT_NUM_CTX
ENV GROQ_API_KEY=${GROQ_API_KEY} \
HuggingFace_API_KEY=${HuggingFace_API_KEY} \
@ -65,7 +68,8 @@ ENV GROQ_API_KEY=${GROQ_API_KEY} \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
VITE_LOG_LEVEL=${VITE_LOG_LEVEL} \
DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX}
RUN mkdir -p ${WORKDIR}/run
CMD pnpm run dev --host

View File

@ -11,7 +11,11 @@ import { createOpenRouter } from '@openrouter/ai-sdk-provider';
import { createMistral } from '@ai-sdk/mistral';
import { createCohere } from '@ai-sdk/cohere';
export function getAnthropicModel(apiKey: string | undefined, model: string) {
export const DEFAULT_NUM_CTX = process.env.DEFAULT_NUM_CTX ?
parseInt(process.env.DEFAULT_NUM_CTX, 10) :
32768;
export function getAnthropicModel(apiKey: string, model: string) {
const anthropic = createAnthropic({
apiKey,
});
@ -68,7 +72,7 @@ export function getGroqModel(apiKey: string | undefined, model: string) {
return openai(model);
}
export function getHuggingFaceModel(apiKey: string | undefined, model: string) {
export function getHuggingFaceModel(apiKey: string, model: string) {
const openai = createOpenAI({
baseURL: 'https://api-inference.huggingface.co/v1/',
apiKey,
@ -78,8 +82,8 @@ export function getHuggingFaceModel(apiKey: string | undefined, model: string) {
}
export function getOllamaModel(baseURL: string, model: string) {
const ollamaInstance = ollama(model, {
numCtx: 32768,
let Ollama = ollama(model, {
numCtx: DEFAULT_NUM_CTX,
});
ollamaInstance.config.baseURL = `${baseURL}/api`;

View File

@ -21,6 +21,7 @@ services:
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"
@ -48,6 +49,7 @@ services:
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- DEFAULT_NUM_CTX=${DEFAULT_NUM_CTX:-32768}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"