Merge pull request #1 from coleam00/main

262
This commit is contained in:
masterdee121212 2024-11-07 17:29:59 +08:00 committed by GitHub
commit 9ca4438881
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 2254 additions and 273 deletions

26
.dockerignore Normal file
View File

@ -0,0 +1,26 @@
# Ignore Git and GitHub files
.git
.github/
# Ignore Husky configuration files
.husky/
# Ignore documentation and metadata files
CONTRIBUTING.md
LICENSE
README.md
# Ignore environment examples and sensitive info
.env
*.local
*.example
# Ignore node modules, logs and cache files
**/*.log
**/node_modules
**/dist
**/build
**/.cache
logs
dist-ssr
.DS_Store

47
.env.example Normal file
View File

@ -0,0 +1,47 @@
# Rename this file to .env once you have filled in the below environment variables!
# Get your GROQ API Key here -
# https://console.groq.com/keys
# You only need this environment variable set if you want to use Groq models
GROQ_API_KEY=
# Get your Open AI API Key by following these instructions -
# https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
# You only need this environment variable set if you want to use GPT models
OPENAI_API_KEY=
# Get your Anthropic API Key in your account settings -
# https://console.anthropic.com/settings/keys
# You only need this environment variable set if you want to use Claude models
ANTHROPIC_API_KEY=
# Get your OpenRouter API Key in your account settings -
# https://openrouter.ai/settings/keys
# You only need this environment variable set if you want to use OpenRouter models
OPEN_ROUTER_API_KEY=
# Get your Google Generative AI API Key by following these instructions -
# https://console.cloud.google.com/apis/credentials
# You only need this environment variable set if you want to use Google Generative AI models
GOOGLE_GENERATIVE_AI_API_KEY=
# You only need this environment variable set if you want to use oLLAMA models
# EXAMPLE http://localhost:11434
OLLAMA_API_BASE_URL=
# You only need this environment variable set if you want to use OpenAI Like models
OPENAI_LIKE_API_BASE_URL=
# You only need this environment variable set if you want to use DeepSeek models through their API
DEEPSEEK_API_KEY=
# Get your OpenAI Like API Key
OPENAI_LIKE_API_KEY=
# Get your Mistral API Key by following these instructions -
# https://console.mistral.ai/api-keys/
# You only need this environment variable set if you want to use Mistral models
MISTRAL_API_KEY=
# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug

View File

@ -25,53 +25,3 @@ jobs:
- name: Run tests
run: pnpm run test
build-and-deploy:
name: Build and Deploy
needs: test
runs-on: ubuntu-latest
environment: ${{ github.ref_name == 'master' && 'production' || 'staging' }}
if: ${{ github.ref != 'refs/heads/master' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup and Build
uses: ./.github/actions/setup-and-build
- name: Deploy to Cloudflare
id: deploy
uses: cloudflare/wrangler-action@v3
with:
wranglerVersion: '* -w'
packageManager: pnpm
apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }}
accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }}
command: pages deploy
- name: Deployment URL
env:
DEPLOYMENT_URL: ${{ steps.deploy.outputs.deployment-url }}
run: echo $DEPLOYMENT_URL
- name: Comment on Commit
if: github.event_name == 'push'
uses: hasura/comment-progress@v2.3.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
repository: ${{ github.repository }}
id: deploy-preview-commit
commit-sha: ${{ github.sha }}
message: '${{ github.sha }} has been deployed to ${{ steps.deploy.outputs.deployment-url }} :rocket:'
recreate: true
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: hasura/comment-progress@v2.3.0
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
repository: ${{ github.repository }}
id: deploy-preview-pr
number: ${{ github.event.number }}
message: '#${{ github.event.number }} has been deployed to ${{ steps.deploy.outputs.deployment-url }} :rocket:'
recreate: true

39
.github/workflows/github-build-push.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Build and Push Container
on:
push:
branches:
- main
# paths:
# - 'Dockerfile'
workflow_dispatch:
jobs:
build-and-push:
runs-on: [ubuntu-latest]
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build and Push Containers
uses: docker/build-push-action@v2
with:
context: .
file: Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
ghcr.io/${{ github.repository }}:latest
ghcr.io/${{ github.repository }}:${{ github.sha }}

7
.gitignore vendored
View File

@ -12,7 +12,7 @@ dist-ssr
*.local
.vscode/*
!.vscode/launch.json
.vscode/launch.json
!.vscode/extensions.json
.idea
.DS_Store
@ -24,7 +24,10 @@ dist-ssr
/.cache
/build
.env*
.env.local
.env
*.vars
.wrangler
_worker.bundle
Modelfile

View File

@ -1,95 +1,93 @@
[![Bolt Open Source Codebase](./public/social_preview_index.jpg)](https://bolt.new)
# Contributing to Bolt.new Fork
> Welcome to the **Bolt** open-source codebase! This repo cpontains a simple example app using the core components from bolt.new to help you get started building **AI-powered software development tools** powered by StackBlitzs **WebContainer API**.
First off, thank you for considering contributing to Bolt.new! This fork aims to expand the capabilities of the original project by integrating multiple LLM providers and enhancing functionality. Every contribution helps make Bolt.new a better tool for developers worldwide.
### Why Build with Bolt + WebContainer API
## 📋 Table of Contents
- [Code of Conduct](#code-of-conduct)
- [How Can I Contribute?](#how-can-i-contribute)
- [Pull Request Guidelines](#pull-request-guidelines)
- [Coding Standards](#coding-standards)
- [Development Setup](#development-setup)
- [Deploymnt with Docker](#docker-deployment-documentation)
- [Project Structure](#project-structure)
By building with the Bolt + WebContainer API you can create browser-based applications that let users **prompt, run, edit, and deploy** full-stack web apps directly in the browser, without the need for virtual machines. With WebContainer API, you can build apps that give AI direct access and full control over a **Node.js server**, **filesystem**, **package manager** and **dev terminal** inside your users browser tab. This powerful combination allows you to create a new class of development tools that support all major javascript libraries and node packages right out of the box, all without remote environments or local installs.
## Code of Conduct
### Whats the Difference Between Bolt (This Repo) and [Bolt.new](https://bolt.new)?
This project and everyone participating in it is governed by our Code of Conduct. By participating, you are expected to uphold this code. Please report unacceptable behavior to the project maintainers.
- **Bolt.new**: This is the **commercial product** from StackBlitz—a hosted, browser-based AI development tool that enables users to prompt, run, edit, and deploy full-stack web applications directly in the browser. Built on top of the [Bolt open-source repo](https://github.com/stackblitz/bolt.new) and powered by the StackBlitz **WebContainer API**.
## How Can I Contribute?
- **Bolt (This Repo)**: This open-source repository provides the core components used to make **Bolt.new**. This repo contains the UI interface for Bolt as well as the server components, built using [Remix Run](https://remix.run/). By leveraging this repo and StackBlitzs **WebContainer API**, you can create your own AI-powered development tools and full-stack applications that run entirely in the browser.
### 🐞 Reporting Bugs and Feature Requests
- Check the issue tracker to avoid duplicates
- Use the issue templates when available
- Include as much relevant information as possible
- For bugs, add steps to reproduce the issue
# Get Started Building with Bolt
### 🔧 Code Contributions
1. Fork the repository
2. Create a new branch for your feature/fix
3. Write your code
4. Submit a pull request
Bolt combines the capabilities of AI with sandboxed development environments to create a collaborative experience where code can be developed by the assistant and the programmer together. Bolt combines [WebContainer API](https://webcontainers.io/api) with [Claude Sonnet 3.5](https://www.anthropic.com/news/claude-3-5-sonnet) using [Remix](https://remix.run/) and the [AI SDK](https://sdk.vercel.ai/).
### ✨ Becoming a Core Contributor
We're looking for dedicated contributors to help maintain and grow this project. If you're interested in becoming a core contributor, please fill out our [Contributor Application Form](https://forms.gle/TBSteXSDCtBDwr5m7).
### WebContainer API
## Pull Request Guidelines
Bolt uses [WebContainers](https://webcontainers.io/) to run generated code in the browser. WebContainers provide Bolt with a full-stack sandbox environment using [WebContainer API](https://webcontainers.io/api). WebContainers run full-stack applications directly in the browser without the cost and security concerns of cloud hosted AI agents. WebContainers are interactive and editable, and enables Bolt's AI to run code and understand any changes from the user.
### 📝 PR Checklist
- [ ] Branch from the main branch
- [ ] Update documentation if needed
- [ ] Manually verify all new functionality works as expected
- [ ] Keep PRs focused and atomic
The [WebContainer API](https://webcontainers.io) is free for personal and open source usage. If you're building an application for commercial usage, you can learn more about our [WebContainer API commercial usage pricing here](https://stackblitz.com/pricing#webcontainer-api).
### 👀 Review Process
1. Manually test the changes
2. At least one maintainer review required
3. Address all review comments
4. Maintain clean commit history
### Remix App
## Coding Standards
Bolt is built with [Remix](https://remix.run/) and
deployed using [CloudFlare Pages](https://pages.cloudflare.com/) and
[CloudFlare Workers](https://workers.cloudflare.com/).
### 💻 General Guidelines
- Follow existing code style
- Comment complex logic
- Keep functions focused and small
- Use meaningful variable names
### AI SDK Integration
Bolt uses the [AI SDK](https://github.com/vercel/ai) to integrate with AI
models. At this time, Bolt supports using Anthropic's Claude Sonnet 3.5.
You can get an API key from the [Anthropic API Console](https://console.anthropic.com/) to use with Bolt.
Take a look at how [Bolt uses the AI SDK](https://github.com/stackblitz/bolt.new/tree/main/app/lib/.server/llm)
## Prerequisites
Before you begin, ensure you have the following installed:
- Node.js (v20.15.1)
- pnpm (v9.4.0)
## Setup
1. Clone the repository (if you haven't already):
## Development Setup
### 🔄 Initial Setup
1. Clone the repository:
```bash
git clone https://github.com/stackblitz/bolt.new.git
git clone https://github.com/coleam00/bolt.new-any-llm.git
```
2. Install dependencies:
```bash
pnpm install
```
3. Create a `.env.local` file in the root directory and add your Anthropic API key:
```
3. Set up environment variables:
- Rename `.env.example` to `.env.local`
- Add your LLM API keys (only set the ones you plan to use):
```bash
GROQ_API_KEY=XXX
OPENAI_API_KEY=XXX
ANTHROPIC_API_KEY=XXX
...
```
Optionally, you can set the debug level:
```
- Optionally set debug level:
```bash
VITE_LOG_LEVEL=debug
```
**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore.
## Available Scripts
- `pnpm run dev`: Starts the development server.
- `pnpm run build`: Builds the project.
- `pnpm run start`: Runs the built application locally using Wrangler Pages. This script uses `bindings.sh` to set up necessary bindings so you don't have to duplicate environment variables.
- `pnpm run preview`: Builds the project and then starts it locally, useful for testing the production build. Note, HTTP streaming currently doesn't work as expected with `wrangler pages dev`.
- `pnpm test:` Runs the test suite using Vitest.
- `pnpm run typecheck`: Runs TypeScript type checking.
- `pnpm run typegen`: Generates TypeScript types using Wrangler.
- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
## Development
To start the development server:
### 🚀 Running the Development Server
```bash
pnpm run dev
```
This will start the Remix Vite development server.
**Note**: You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
## Testing
@ -108,3 +106,96 @@ pnpm run deploy
```
Make sure you have the necessary permissions and Wrangler is correctly configured for your Cloudflare account.
# Docker Deployment Documentation
This guide outlines various methods for building and deploying the application using Docker.
## Build Methods
### 1. Using Helper Scripts
NPM scripts are provided for convenient building:
```bash
# Development build
npm run dockerbuild
# Production build
npm run dockerbuild:prod
```
### 2. Direct Docker Build Commands
You can use Docker's target feature to specify the build environment:
```bash
# Development build
docker build . --target bolt-ai-development
# Production build
docker build . --target bolt-ai-production
```
### 3. Docker Compose with Profiles
Use Docker Compose profiles to manage different environments:
```bash
# Development environment
docker-compose --profile development up
# Production environment
docker-compose --profile production up
```
## Running the Application
After building using any of the methods above, run the container with:
```bash
# Development
docker run -p 5173:5173 --env-file .env.local bolt-ai:development
# Production
docker run -p 5173:5173 --env-file .env.local bolt-ai:production
```
## Deployment with Coolify
[Coolify](https://github.com/coollabsio/coolify) provides a straightforward deployment process:
1. Import your Git repository as a new project
2. Select your target environment (development/production)
3. Choose "Docker Compose" as the Build Pack
4. Configure deployment domains
5. Set the custom start command:
```bash
docker compose --profile production up
```
6. Configure environment variables
- Add necessary AI API keys
- Adjust other environment variables as needed
7. Deploy the application
## VS Code Integration
The `docker-compose.yaml` configuration is compatible with VS Code dev containers:
1. Open the command palette in VS Code
2. Select the dev container configuration
3. Choose the "development" profile from the context menu
## Environment Files
Ensure you have the appropriate `.env.local` file configured before running the containers. This file should contain:
- API keys
- Environment-specific configurations
- Other required environment variables
## Notes
- Port 5173 is exposed and mapped for both development and production environments
- Environment variables are loaded from `.env.local`
- Different profiles (development/production) can be used for different deployment scenarios
- The configuration supports both local development and production deployment

67
Dockerfile Normal file
View File

@ -0,0 +1,67 @@
ARG BASE=node:20.18.0
FROM ${BASE} AS base
WORKDIR /app
# Install dependencies (this step is cached as long as the dependencies don't change)
COPY package.json pnpm-lock.yaml ./
RUN corepack enable pnpm && pnpm install
# Copy the rest of your app's source code
COPY . .
# Expose the port the app runs on
EXPOSE 5173
# Production image
FROM base AS bolt-ai-production
# Define environment variables with default values or let them be overridden
ARG GROQ_API_KEY
ARG OPENAI_API_KEY
ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG VITE_LOG_LEVEL=debug
ENV WRANGLER_SEND_METRICS=false \
GROQ_API_KEY=${GROQ_API_KEY} \
OPENAI_API_KEY=${OPENAI_API_KEY} \
ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
# Pre-configure wrangler to disable metrics
RUN mkdir -p /root/.config/.wrangler && \
echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json
RUN npm run build
CMD [ "pnpm", "run", "dockerstart"]
# Development image
FROM base AS bolt-ai-development
# Define the same environment variables for development
ARG GROQ_API_KEY
ARG OPENAI_API_KEY
ARG ANTHROPIC_API_KEY
ARG OPEN_ROUTER_API_KEY
ARG GOOGLE_GENERATIVE_AI_API_KEY
ARG OLLAMA_API_BASE_URL
ARG VITE_LOG_LEVEL=debug
ENV GROQ_API_KEY=${GROQ_API_KEY} \
OPENAI_API_KEY=${OPENAI_API_KEY} \
ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \
OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \
GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \
OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \
VITE_LOG_LEVEL=${VITE_LOG_LEVEL}
RUN mkdir -p ${WORKDIR}/run
CMD pnpm run dev --host

250
README.md
View File

@ -1,54 +1,250 @@
[![Bolt.new: AI-Powered Full-Stack Web Development in the Browser](./public/social_preview_index.jpg)](https://bolt.new)
# Bolt.new Fork by Cole Medin
This fork of Bolt.new allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models.
# Requested Additions to this Fork - Feel Free to Contribute!!
- ✅ OpenRouter Integration (@coleam00)
- ✅ Gemini Integration (@jonathands)
- ✅ Autogenerate Ollama models from what is downloaded (@yunatamos)
- ✅ Filter models by provider (@jasonm23)
- ✅ Download project as ZIP (@fabwaseem)
- ✅ Improvements to the main Bolt.new prompt in `app\lib\.server\llm\prompts.ts` (@kofi-bhr)
- ✅ DeepSeek API Integration (@zenith110)
- ✅ Mistral API Integration (@ArulGandhi)
- ✅ "Open AI Like" API Integration (@ZerxZ)
- ✅ Ability to sync files (one way sync) to local folder (@muzafferkadir)
- ✅ Containerize the application with Docker for easy installation (@aaronbolton)
- ✅ Publish projects directly to GitHub (@goncaloalves)
- ⬜ Prevent Bolt from rewriting files as often (Done but need to review PR still)
- ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start)
- ⬜ **HIGH PRIORITY** Load local projects into the app
- ⬜ **HIGH PRIORITY** - Attach images to prompts
- ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call
- ⬜ LM Studio Integration
- ⬜ Together Integration
- ⬜ Azure Open AI API Integration
- ⬜ HuggingFace Integration
- ⬜ Perplexity Integration
- ⬜ Vertex AI Integration
- ⬜ Cohere Integration
- ⬜ Deploy directly to Vercel/Netlify/other similar platforms
- ⬜ Ability to revert code to earlier version
- ⬜ Prompt caching
- ⬜ Better prompt enhancing
- ⬜ Ability to enter API keys in the UI
- ⬜ Have LLM plan the project in a MD file for better results/transparency
- ⬜ VSCode Integration with git-like confirmations
- ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc.
- ⬜ Voice prompting
# Bolt.new: AI-Powered Full-Stack Web Development in the Browser
Bolt.new is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browser—no local setup required. If you're here to build your own AI powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md)
Bolt.new is an AI-powered web development agent that allows you to prompt, run, edit, and deploy full-stack applications directly from your browser—no local setup required. If you're here to build your own AI-powered web dev agent using the Bolt open source codebase, [click here to get started!](./CONTRIBUTING.md)
## What Makes Bolt.new Different
Claude, v0, etc are incredible- but you can't install packages, run backends or edit code. Thats where Bolt.new stands out:
Claude, v0, etc are incredible- but you can't install packages, run backends, or edit code. Thats where Bolt.new stands out:
- **Full-Stack in the Browser**: Bolt.new integrates cutting-edge AI models with an in-browser development environment powered by **StackBlitzs WebContainers**. This allows you to:
- Install and run npm tools and libraries (like Vite, Next.js, and more)
- Run Node.js servers
- Interact with 3rd party APIs
- Interact with third-party APIs
- Deploy to production from chat
- Share your work via a URL
- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, Bolt.new gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the entire app lifecycle—from creation to deployment.
- **AI with Environment Control**: Unlike traditional dev environments where the AI can only assist in code generation, Bolt.new gives AI models **complete control** over the entire environment including the filesystem, node server, package manager, terminal, and browser console. This empowers AI agents to handle the whole app lifecycle—from creation to deployment.
Whether youre an experienced developer, a PM or designer, Bolt.new allows you to build production-grade full-stack applications with ease.
Whether youre an experienced developer, a PM, or a designer, Bolt.new allows you to easily build production-grade full-stack applications.
For developers interested in building their own AI-powered development tools with WebContainers, check out the open-source Bolt codebase in this repo!
## Setup
Many of you are new users to installing software from Github. If you have any installation troubles reach out and submit an "issue" using the links above, or feel free to enhance this documentation by forking, editing the instructions, and doing a pull request.
1. Install Git from https://git-scm.com/downloads
2. Install Node.js from https://nodejs.org/en/download/
Pay attention to the installer notes after completion.
On all operating systems, the path to Node.js should automatically be added to your system path. But you can check your path if you want to be sure. On Windows, you can search for "edit the system environment variables" in your system, select "Environment Variables..." once you are in the system properties, and then check for a path to Node in your "Path" system variable. On a Mac or Linux machine, it will tell you to check if /usr/local/bin is in your $PATH. To determine if usr/local/bin is included in $PATH open your Terminal and run:
```
echo $PATH .
```
If you see usr/local/bin in the output then you're good to go.
3. Clone the repository (if you haven't already) by opening a Terminal window (or CMD with admin permissions) and then typing in this:
```
git clone https://github.com/coleam00/bolt.new-any-llm.git
```
3. Rename .env.example to .env and add your LLM API keys. You will find this file on a Mac at "[your name]/bold.new-any-llm/.env.example". For Windows and Linux the path will be similar.
![image](https://github.com/user-attachments/assets/7e6a532c-2268-401f-8310-e8d20c731328)
If you can't see the file indicated above, its likely you can't view hidden files. On Mac, open a Terminal window and enter this command below. On Windows, you will see the hidden files option in File Explorer Settings. A quick Google search will help you if you are stuck here.
```
defaults write com.apple.finder AppleShowAllFiles YES
```
**NOTE**: you only have to set the ones you want to use and Ollama doesn't need an API key because it runs locally on your computer:
Get your GROQ API Key here: https://console.groq.com/keys
Get your Open AI API Key by following these instructions: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key
Get your Anthropic API Key in your account settings: https://console.anthropic.com/settings/keys
```
GROQ_API_KEY=XXX
OPENAI_API_KEY=XXX
ANTHROPIC_API_KEY=XXX
```
Optionally, you can set the debug level:
```
VITE_LOG_LEVEL=debug
```
**Important**: Never commit your `.env` file to version control. It's already included in .gitignore.
## Run with Docker
Prerequisites:
Git and Node.js as mentioned above, as well as Docker: https://www.docker.com/
### 1a. Using Helper Scripts
NPM scripts are provided for convenient building:
```bash
# Development build
npm run dockerbuild
# Production build
npm run dockerbuild:prod
```
### 1b. Direct Docker Build Commands (alternative to using NPM scripts)
You can use Docker's target feature to specify the build environment instead of using NPM scripts if you wish:
```bash
# Development build
docker build . --target bolt-ai-development
# Production build
docker build . --target bolt-ai-production
```
### 2. Docker Compose with Profiles to Run the Container
Use Docker Compose profiles to manage different environments:
```bash
# Development environment
docker-compose --profile development up
# Production environment
docker-compose --profile production up
```
When you run the Docker Compose command with the development profile, any changes you
make on your machine to the code will automatically be reflected in the site running
on the container (i.e. hot reloading still applies!).
## Run Without Docker
1. Install dependencies using Terminal (or CMD in Windows with admin permissions):
```
pnpm install
```
If you get an error saying "command not found: pnpm" or similar, then that means pnpm isn't installed. You can install it via this:
```
sudo npm install -g pnpm
```
2. Start the application with the command:
```bash
pnpm run dev
```
## Super Important Note on Running Ollama Models
Ollama models by default only have 2048 tokens for their context window. Even for large models that can easily handle way more.
This is not a large enough window to handle the Bolt.new/oTToDev prompt! You have to create a version of any model you want
to use where you specify a larger context window. Luckily it's super easy to do that.
All you have to do is:
- Create a file called "Modelfile" (no file extension) anywhere on your computer
- Put in the two lines:
```
FROM [Ollama model ID such as qwen2.5-coder:7b]
PARAMETER num_ctx 32768
```
- Run the command:
```
ollama create -f Modelfile [your new model ID, can be whatever you want (example: qwen2.5-coder-extra-ctx:7b)]
```
Now you have a new Ollama model that isn't heavily limited in the context length like Ollama models are by default for some reason.
You'll see this new model in the list of Ollama models along with all the others you pulled!
## Adding New LLMs:
To make new LLMs available to use in this version of Bolt.new, head on over to `app/utils/constants.ts` and find the constant MODEL_LIST. Each element in this array is an object that has the model ID for the name (get this from the provider's API documentation), a label for the frontend model dropdown, and the provider.
By default, Anthropic, OpenAI, Groq, and Ollama are implemented as providers, but the YouTube video for this repo covers how to extend this to work with more providers if you wish!
When you add a new model to the MODEL_LIST array, it will immediately be available to use when you run the app locally or reload it. For Ollama models, make sure you have the model installed already before trying to use it here!
## Available Scripts
- `pnpm run dev`: Starts the development server.
- `pnpm run build`: Builds the project.
- `pnpm run start`: Runs the built application locally using Wrangler Pages. This script uses `bindings.sh` to set up necessary bindings so you don't have to duplicate environment variables.
- `pnpm run preview`: Builds the project and then starts it locally, useful for testing the production build. Note, HTTP streaming currently doesn't work as expected with `wrangler pages dev`.
- `pnpm test`: Runs the test suite using Vitest.
- `pnpm run typecheck`: Runs TypeScript type checking.
- `pnpm run typegen`: Generates TypeScript types using Wrangler.
- `pnpm run deploy`: Builds the project and deploys it to Cloudflare Pages.
## Development
To start the development server:
```bash
pnpm run dev
```
This will start the Remix Vite development server. You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway.
## Tips and Tricks
Here are some tips to get the most out of Bolt.new:
- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular Javacsript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
- **Be specific about your stack**: If you want to use specific frameworks or libraries (like Astro, Tailwind, ShadCN, or any other popular JavaScript framework), mention them in your initial prompt to ensure Bolt scaffolds the project accordingly.
- **Use the enhance prompt icon**: Before sending your prompt, try clicking the 'enhance' icon to have the AI model help you refine your prompt, then edit the results before submitting.
- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired upu right before building out more advanced functionality.
- **Scaffold the basics first, then add features**: Make sure the basic structure of your application is in place before diving into more advanced functionality. This helps Bolt understand the foundation of your project and ensure everything is wired up right before building out more advanced functionality.
- **Batch simple instructions**: Save time by combining simple instructions into one message. For example, you can ask Bolt to change the color scheme, add mobile responsiveness, and restart the dev server, all in one go saving you time and reducing API credit consumption significantly.
## FAQs
**Where do sign up for a paid plan?**
Bolt.new is free to get started. If you need more AI tokens or want private projects, you can purchase a paid subscription in your [Bolt.new](https://bolt.new) settings, in the lower left hand corner of the application.
**What happens if I hit the free usage limit?**
Once your free daily token limit is reached, AI interactions are paused until the next day or until you upgrade your plan.
**Is Bolt in beta?**
Yes, Bolt.new is in beta, and we are actively improving it based on feedback.
**How can I report Bolt.new issues?**
Check out the [Issues section](https://github.com/bolt.new/issues) to report an issue or request a new feature. Please use the search feature to check if someone else has already submitted the same issue/request.
**What frameworks/libraries currently work on Bolt?**
Bolt.new supports most popular javascript frameworks and libraries. If it runs on StackBlitz, it will run on Bolt.new as well.
**How can I add make sure my framework/project works well in bolt?**
We are excited to work with the javascript ecosystem to improve functionality in Bolt. Reach out to us via [hello@stackblitz.com](mailto:hello@stackblitz.com) to discuss how we can partner!

View File

@ -1,3 +1,5 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import type { Message } from 'ai';
import React, { type RefCallback } from 'react';
import { ClientOnly } from 'remix-utils/client-only';
@ -5,11 +7,65 @@ import { Menu } from '~/components/sidebar/Menu.client';
import { IconButton } from '~/components/ui/IconButton';
import { Workbench } from '~/components/workbench/Workbench.client';
import { classNames } from '~/utils/classNames';
import { MODEL_LIST, DEFAULT_PROVIDER } from '~/utils/constants';
import { Messages } from './Messages.client';
import { SendButton } from './SendButton.client';
import { useState } from 'react';
import styles from './BaseChat.module.scss';
const EXAMPLE_PROMPTS = [
{ text: 'Build a todo app in React using Tailwind' },
{ text: 'Build a simple blog using Astro' },
{ text: 'Create a cookie consent form using Material UI' },
{ text: 'Make a space invaders game' },
{ text: 'How do I center a div?' },
];
const providerList = [...new Set(MODEL_LIST.map((model) => model.provider))]
const ModelSelector = ({ model, setModel, modelList, providerList }) => {
const [provider, setProvider] = useState(DEFAULT_PROVIDER);
return (
<div className="mb-2">
<select
value={provider}
onChange={(e) => {
setProvider(e.target.value);
const firstModel = [...modelList].find(m => m.provider == e.target.value);
setModel(firstModel ? firstModel.name : '');
}}
className="w-full p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none"
>
{providerList.map((provider) => (
<option key={provider} value={provider}>
{provider}
</option>
))}
<option key="Ollama" value="Ollama">
Ollama
</option>
<option key="OpenAILike" value="OpenAILike">
OpenAILike
</option>
</select>
<select
value={model}
onChange={(e) => setModel(e.target.value)}
className="w-full p-2 rounded-lg border border-bolt-elements-borderColor bg-bolt-elements-prompt-background text-bolt-elements-textPrimary focus:outline-none"
>
{[...modelList].filter(e => e.provider == provider && e.name).map((modelOption) => (
<option key={modelOption.name} value={modelOption.name}>
{modelOption.label}
</option>
))}
</select>
</div>
);
};
const TEXTAREA_MIN_HEIGHT = 76;
interface BaseChatProps {
textareaRef?: React.RefObject<HTMLTextAreaElement> | undefined;
messageRef?: RefCallback<HTMLDivElement> | undefined;
@ -21,22 +77,14 @@ interface BaseChatProps {
enhancingPrompt?: boolean;
promptEnhanced?: boolean;
input?: string;
model: string;
setModel: (model: string) => void;
handleStop?: () => void;
sendMessage?: (event: React.UIEvent, messageInput?: string) => void;
handleInputChange?: (event: React.ChangeEvent<HTMLTextAreaElement>) => void;
enhancePrompt?: () => void;
}
const EXAMPLE_PROMPTS = [
{ text: 'Build a todo app in React using Tailwind' },
{ text: 'Build a simple blog using Astro' },
{ text: 'Create a cookie consent form using Material UI' },
{ text: 'Make a space invaders game' },
{ text: 'How do I center a div?' },
];
const TEXTAREA_MIN_HEIGHT = 76;
export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
(
{
@ -50,6 +98,8 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
promptEnhanced = false,
messages,
input = '',
model,
setModel,
sendMessage,
handleInputChange,
enhancePrompt,
@ -69,7 +119,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
data-chat-visible={showChat}
>
<ClientOnly>{() => <Menu />}</ClientOnly>
<div ref={scrollRef} className="flex overflow-scroll w-full h-full">
<div ref={scrollRef} className="flex overflow-y-auto w-full h-full">
<div className={classNames(styles.Chat, 'flex flex-col flex-grow min-w-[var(--chat-min-width)] h-full')}>
{!chatStarted && (
<div id="intro" className="mt-[26vh] max-w-chat mx-auto">
@ -103,6 +153,12 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
'sticky bottom-0': chatStarted,
})}
>
<ModelSelector
model={model}
setModel={setModel}
modelList={MODEL_LIST}
providerList={providerList}
/>
<div
className={classNames(
'shadow-sm border border-bolt-elements-borderColor bg-bolt-elements-prompt-background backdrop-filter backdrop-blur-[8px] rounded-lg overflow-hidden',

View File

@ -1,3 +1,5 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { useStore } from '@nanostores/react';
import type { Message } from 'ai';
import { useChat } from 'ai/react';
@ -9,6 +11,7 @@ import { useChatHistory } from '~/lib/persistence';
import { chatStore } from '~/lib/stores/chat';
import { workbenchStore } from '~/lib/stores/workbench';
import { fileModificationsToHTML } from '~/utils/diff';
import { DEFAULT_MODEL } from '~/utils/constants';
import { cubicEasingFn } from '~/utils/easings';
import { createScopedLogger, renderLogger } from '~/utils/logger';
import { BaseChat } from './BaseChat';
@ -70,6 +73,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
const textareaRef = useRef<HTMLTextAreaElement>(null);
const [chatStarted, setChatStarted] = useState(initialMessages.length > 0);
const [model, setModel] = useState(DEFAULT_MODEL);
const { showChat } = useStore(chatStore);
@ -178,7 +182,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
* manually reset the input and we'd have to manually pass in file attachments. However, those
* aren't relevant here.
*/
append({ role: 'user', content: `${diff}\n\n${_input}` });
append({ role: 'user', content: `[Model: ${model}]\n\n${diff}\n\n${_input}` });
/**
* After sending a new message we reset all modifications since the model
@ -186,7 +190,7 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
*/
workbenchStore.resetAllFileModifications();
} else {
append({ role: 'user', content: _input });
append({ role: 'user', content: `[Model: ${model}]\n\n${_input}` });
}
setInput('');
@ -209,6 +213,8 @@ export const ChatImpl = memo(({ initialMessages, storeMessageHistory }: ChatProp
enhancingPrompt={enhancingPrompt}
promptEnhanced={promptEnhanced}
sendMessage={sendMessage}
model={model}
setModel={setModel}
messageRef={messageRef}
scrollRef={scrollRef}
handleInputChange={handleInputChange}

View File

@ -1,4 +1,7 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { modificationsRegex } from '~/utils/diff';
import { MODEL_REGEX } from '~/utils/constants';
import { Markdown } from './Markdown';
interface UserMessageProps {
@ -14,5 +17,5 @@ export function UserMessage({ content }: UserMessageProps) {
}
function sanitizeUserMessage(content: string) {
return content.replace(modificationsRegex, '').trim();
return content.replace(modificationsRegex, '').replace(MODEL_REGEX, '').trim();
}

View File

@ -164,13 +164,6 @@ export function Menu() {
</DialogRoot>
</div>
<div className="flex items-center border-t border-bolt-elements-borderColor p-4">
<a href="/logout">
<IconButton className="p-1.5 gap-1.5">
<>
Logout <span className="i-ph:sign-out text-lg" />
</>
</IconButton>
</a>
<ThemeSwitch className="ml-auto" />
</div>
</div>

View File

@ -1,7 +1,7 @@
import { useStore } from '@nanostores/react';
import { motion, type HTMLMotionProps, type Variants } from 'framer-motion';
import { computed } from 'nanostores';
import { memo, useCallback, useEffect } from 'react';
import { memo, useCallback, useEffect, useState } from 'react';
import { toast } from 'react-toastify';
import {
type OnChangeCallback as OnEditorChange,
@ -55,6 +55,8 @@ const workbenchVariants = {
export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) => {
renderLogger.trace('Workbench');
const [isSyncing, setIsSyncing] = useState(false);
const hasPreview = useStore(computed(workbenchStore.previews, (previews) => previews.length > 0));
const showWorkbench = useStore(workbenchStore.showWorkbench);
const selectedFile = useStore(workbenchStore.selectedFile);
@ -99,6 +101,21 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
workbenchStore.resetCurrentDocument();
}, []);
const handleSyncFiles = useCallback(async () => {
setIsSyncing(true);
try {
const directoryHandle = await window.showDirectoryPicker();
await workbenchStore.syncFiles(directoryHandle);
toast.success('Files synced successfully');
} catch (error) {
console.error('Error syncing files:', error);
toast.error('Failed to sync files');
} finally {
setIsSyncing(false);
}
}, []);
return (
chatStarted && (
<motion.div
@ -122,15 +139,55 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
<Slider selected={selectedView} options={sliderOptions} setSelected={setSelectedView} />
<div className="ml-auto" />
{selectedView === 'code' && (
<PanelHeaderButton
className="mr-1 text-sm"
onClick={() => {
workbenchStore.toggleTerminal(!workbenchStore.showTerminal.get());
}}
>
<div className="i-ph:terminal" />
Toggle Terminal
</PanelHeaderButton>
<>
<PanelHeaderButton
className="mr-1 text-sm"
onClick={() => {
workbenchStore.downloadZip();
}}
>
<div className="i-ph:code" />
Download Code
</PanelHeaderButton>
<PanelHeaderButton className="mr-1 text-sm" onClick={handleSyncFiles} disabled={isSyncing}>
{isSyncing ? <div className="i-ph:spinner" /> : <div className="i-ph:cloud-arrow-down" />}
{isSyncing ? 'Syncing...' : 'Sync Files'}
</PanelHeaderButton>
<PanelHeaderButton
className="mr-1 text-sm"
onClick={() => {
workbenchStore.toggleTerminal(!workbenchStore.showTerminal.get());
}}
>
<div className="i-ph:terminal" />
Toggle Terminal
</PanelHeaderButton>
<PanelHeaderButton
className="mr-1 text-sm"
onClick={() => {
const repoName = prompt("Please enter a name for your new GitHub repository:", "bolt-generated-project");
if (!repoName) {
alert("Repository name is required. Push to GitHub cancelled.");
return;
}
const githubUsername = prompt("Please enter your GitHub username:");
if (!githubUsername) {
alert("GitHub username is required. Push to GitHub cancelled.");
return;
}
const githubToken = prompt("Please enter your GitHub personal access token:");
if (!githubToken) {
alert("GitHub token is required. Push to GitHub cancelled.");
return;
}
workbenchStore.pushToGitHub(repoName, githubUsername, githubToken);
}}
>
<div className="i-ph:github-logo" />
Push to GitHub
</PanelHeaderButton>
</>
)}
<IconButton
icon="i-ph:x-circle"
@ -173,7 +230,6 @@ export const Workbench = memo(({ chatStarted, isStreaming }: WorkspaceProps) =>
)
);
});
interface ViewProps extends HTMLMotionProps<'div'> {
children: JSX.Element;
}

View File

@ -5,6 +5,7 @@ import { renderToReadableStream } from 'react-dom/server';
import { renderHeadToString } from 'remix-island';
import { Head } from './root';
import { themeStore } from '~/lib/stores/theme';
import { initializeModelList } from '~/utils/constants';
export default async function handleRequest(
request: Request,
@ -13,6 +14,8 @@ export default async function handleRequest(
remixContext: EntryContext,
_loadContext: AppLoadContext,
) {
await initializeModelList();
const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
signal: request.signal,
onError(error: unknown) {

View File

@ -1,9 +1,46 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { env } from 'node:process';
export function getAPIKey(cloudflareEnv: Env) {
export function getAPIKey(cloudflareEnv: Env, provider: string) {
/**
* The `cloudflareEnv` is only used when deployed or when previewing locally.
* In development the environment variables are available through `env`.
*/
return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY;
switch (provider) {
case 'Anthropic':
return env.ANTHROPIC_API_KEY || cloudflareEnv.ANTHROPIC_API_KEY;
case 'OpenAI':
return env.OPENAI_API_KEY || cloudflareEnv.OPENAI_API_KEY;
case 'Google':
return env.GOOGLE_GENERATIVE_AI_API_KEY || cloudflareEnv.GOOGLE_GENERATIVE_AI_API_KEY;
case 'Groq':
return env.GROQ_API_KEY || cloudflareEnv.GROQ_API_KEY;
case 'OpenRouter':
return env.OPEN_ROUTER_API_KEY || cloudflareEnv.OPEN_ROUTER_API_KEY;
case 'Deepseek':
return env.DEEPSEEK_API_KEY || cloudflareEnv.DEEPSEEK_API_KEY
case 'Mistral':
return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY;
case "OpenAILike":
return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY;
default:
return "";
}
}
export function getBaseURL(cloudflareEnv: Env, provider: string) {
switch (provider) {
case 'OpenAILike':
return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL;
case 'Ollama':
let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434";
if (env.RUNNING_IN_DOCKER === 'true') {
baseUrl = baseUrl.replace("localhost", "host.docker.internal");
}
return baseUrl;
default:
return "";
}
}

View File

@ -1,5 +1,5 @@
// see https://docs.anthropic.com/en/docs/about-claude/models
export const MAX_TOKENS = 8192;
export const MAX_TOKENS = 8000;
// limits the number of model responses that can be returned in a single request
export const MAX_RESPONSE_SEGMENTS = 2;

View File

@ -1,9 +1,107 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { getAPIKey, getBaseURL } from '~/lib/.server/llm/api-key';
import { createAnthropic } from '@ai-sdk/anthropic';
import { createOpenAI } from '@ai-sdk/openai';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { ollama } from 'ollama-ai-provider';
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { mistral } from '@ai-sdk/mistral';
import { createMistral } from '@ai-sdk/mistral';
export function getAnthropicModel(apiKey: string) {
export function getAnthropicModel(apiKey: string, model: string) {
const anthropic = createAnthropic({
apiKey,
});
return anthropic('claude-3-5-sonnet-20240620');
return anthropic(model);
}
export function getOpenAILikeModel(baseURL:string,apiKey: string, model: string) {
const openai = createOpenAI({
baseURL,
apiKey,
});
return openai(model);
}
export function getOpenAIModel(apiKey: string, model: string) {
const openai = createOpenAI({
apiKey,
});
return openai(model);
}
export function getMistralModel(apiKey: string, model: string) {
const mistral = createMistral({
apiKey
});
return mistral(model);
}
export function getGoogleModel(apiKey: string, model: string) {
const google = createGoogleGenerativeAI(
apiKey,
);
return google(model);
}
export function getGroqModel(apiKey: string, model: string) {
const openai = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey,
});
return openai(model);
}
export function getOllamaModel(baseURL: string, model: string) {
let Ollama = ollama(model);
Ollama.config.baseURL = `${baseURL}/api`;
return Ollama;
}
export function getDeepseekModel(apiKey: string, model: string){
const openai = createOpenAI({
baseURL: 'https://api.deepseek.com/beta',
apiKey,
});
return openai(model);
}
export function getOpenRouterModel(apiKey: string, model: string) {
const openRouter = createOpenRouter({
apiKey
});
return openRouter.chat(model);
}
export function getModel(provider: string, model: string, env: Env) {
const apiKey = getAPIKey(env, provider);
const baseURL = getBaseURL(env, provider);
switch (provider) {
case 'Anthropic':
return getAnthropicModel(apiKey, model);
case 'OpenAI':
return getOpenAIModel(apiKey, model);
case 'Groq':
return getGroqModel(apiKey, model);
case 'OpenRouter':
return getOpenRouterModel(apiKey, model);
case 'Google':
return getGoogleModel(apiKey, model)
case 'OpenAILike':
return getOpenAILikeModel(baseURL,apiKey, model);
case 'Deepseek':
return getDeepseekModel(apiKey, model)
case 'Mistral':
return getMistralModel(apiKey, model);
default:
return getOllamaModel(baseURL, model);
}
}

View File

@ -29,7 +29,32 @@ You are Bolt, an expert AI assistant and exceptional senior software developer w
IMPORTANT: When choosing databases or npm packages, prefer options that don't rely on native binaries. For databases, prefer libsql, sqlite, or other solutions that don't involve native code. WebContainer CANNOT execute arbitrary native binaries.
Available shell commands: cat, chmod, cp, echo, hostname, kill, ln, ls, mkdir, mv, ps, pwd, rm, rmdir, xxd, alias, cd, clear, curl, env, false, getconf, head, sort, tail, touch, true, uptime, which, code, jq, loadenv, node, python3, wasm, xdg-open, command, exit, export, source
Available shell commands:
File Operations:
- cat: Display file contents
- cp: Copy files/directories
- ls: List directory contents
- mkdir: Create directory
- mv: Move/rename files
- rm: Remove files
- rmdir: Remove empty directories
- touch: Create empty file/update timestamp
System Information:
- hostname: Show system name
- ps: Display running processes
- pwd: Print working directory
- uptime: Show system uptime
- env: Environment variables
Development Tools:
- node: Execute Node.js code
- python3: Run Python scripts
- code: VSCode operations
- jq: Process JSON
Other Utilities:
- curl, head, sort, tail, clear, which, export, chmod, scho, hostname, kill, ln, xxd, alias, false, getconf, true, loadenv, wasm, xdg-open, command, exit, source
</system_constraints>
<code_formatting_info>
@ -84,6 +109,36 @@ You are Bolt, an expert AI assistant and exceptional senior software developer w
</${MODIFICATIONS_TAG_NAME}>
</diff_spec>
<chain_of_thought_instructions>
Before providing a solution, BRIEFLY outline your implementation steps. This helps ensure systematic thinking and clear communication. Your planning should:
- List concrete steps you'll take
- Identify key components needed
- Note potential challenges
- Be concise (2-4 lines maximum)
Example responses:
User: "Create a todo list app with local storage"
Assistant: "Sure. I'll start by:
1. Set up Vite + React
2. Create TodoList and TodoItem components
3. Implement localStorage for persistence
4. Add CRUD operations
Let's start now.
[Rest of response...]"
User: "Help debug why my API calls aren't working"
Assistant: "Great. My first steps will be:
1. Check network requests
2. Verify API endpoint format
3. Examine error handling
[Rest of response...]"
</chain_of_thought_instructions>
<artifact_info>
Bolt creates a SINGLE, comprehensive artifact for each project. The artifact contains all necessary steps and components, including:

View File

@ -1,8 +1,10 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { streamText as _streamText, convertToCoreMessages } from 'ai';
import { getAPIKey } from '~/lib/.server/llm/api-key';
import { getAnthropicModel } from '~/lib/.server/llm/model';
import { getModel } from '~/lib/.server/llm/model';
import { MAX_TOKENS } from './constants';
import { getSystemPrompt } from './prompts';
import { MODEL_LIST, DEFAULT_MODEL, DEFAULT_PROVIDER } from '~/utils/constants';
interface ToolResult<Name extends string, Args, Result> {
toolCallId: string;
@ -15,21 +17,50 @@ interface Message {
role: 'user' | 'assistant';
content: string;
toolInvocations?: ToolResult<string, unknown, unknown>[];
model?: string;
}
export type Messages = Message[];
export type StreamingOptions = Omit<Parameters<typeof _streamText>[0], 'model'>;
function extractModelFromMessage(message: Message): { model: string; content: string } {
const modelRegex = /^\[Model: (.*?)\]\n\n/;
const match = message.content.match(modelRegex);
if (match) {
const model = match[1];
const content = message.content.replace(modelRegex, '');
return { model, content };
}
// Default model if not specified
return { model: DEFAULT_MODEL, content: message.content };
}
export function streamText(messages: Messages, env: Env, options?: StreamingOptions) {
let currentModel = DEFAULT_MODEL;
const processedMessages = messages.map((message) => {
if (message.role === 'user') {
const { model, content } = extractModelFromMessage(message);
if (model && MODEL_LIST.find((m) => m.name === model)) {
currentModel = model; // Update the current model
}
return { ...message, content };
}
return message;
});
const provider = MODEL_LIST.find((model) => model.name === currentModel)?.provider || DEFAULT_PROVIDER;
return _streamText({
model: getAnthropicModel(getAPIKey(env)),
model: getModel(provider, currentModel, env),
system: getSystemPrompt(),
maxTokens: MAX_TOKENS,
headers: {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
},
messages: convertToCoreMessages(messages),
// headers: {
// 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
// },
messages: convertToCoreMessages(processedMessages),
...options,
});
}

View File

@ -128,7 +128,9 @@ export class ActionRunner {
const webcontainer = await this.#webcontainer;
const process = await webcontainer.spawn('jsh', ['-c', action.content]);
const process = await webcontainer.spawn('jsh', ['-c', action.content], {
env: { npm_config_yes: true },
});
action.abortSignal.addEventListener('abort', () => {
process.kill();

View File

@ -9,6 +9,9 @@ import { EditorStore } from './editor';
import { FilesStore, type FileMap } from './files';
import { PreviewsStore } from './previews';
import { TerminalStore } from './terminal';
import JSZip from 'jszip';
import { saveAs } from 'file-saver';
import { Octokit } from "@octokit/rest";
export interface ArtifactState {
id: string;
@ -271,6 +274,171 @@ export class WorkbenchStore {
const artifacts = this.artifacts.get();
return artifacts[id];
}
async downloadZip() {
const zip = new JSZip();
const files = this.files.get();
for (const [filePath, dirent] of Object.entries(files)) {
if (dirent?.type === 'file' && !dirent.isBinary) {
// remove '/home/project/' from the beginning of the path
const relativePath = filePath.replace(/^\/home\/project\//, '');
// split the path into segments
const pathSegments = relativePath.split('/');
// if there's more than one segment, we need to create folders
if (pathSegments.length > 1) {
let currentFolder = zip;
for (let i = 0; i < pathSegments.length - 1; i++) {
currentFolder = currentFolder.folder(pathSegments[i])!;
}
currentFolder.file(pathSegments[pathSegments.length - 1], dirent.content);
} else {
// if there's only one segment, it's a file in the root
zip.file(relativePath, dirent.content);
}
}
}
const content = await zip.generateAsync({ type: 'blob' });
saveAs(content, 'project.zip');
}
async syncFiles(targetHandle: FileSystemDirectoryHandle) {
const files = this.files.get();
const syncedFiles = [];
for (const [filePath, dirent] of Object.entries(files)) {
if (dirent?.type === 'file' && !dirent.isBinary) {
const relativePath = filePath.replace(/^\/home\/project\//, '');
const pathSegments = relativePath.split('/');
let currentHandle = targetHandle;
for (let i = 0; i < pathSegments.length - 1; i++) {
currentHandle = await currentHandle.getDirectoryHandle(pathSegments[i], { create: true });
}
// create or get the file
const fileHandle = await currentHandle.getFileHandle(pathSegments[pathSegments.length - 1], { create: true });
// write the file content
const writable = await fileHandle.createWritable();
await writable.write(dirent.content);
await writable.close();
syncedFiles.push(relativePath);
}
}
return syncedFiles;
}
async pushToGitHub(repoName: string, githubUsername: string, ghToken: string) {
try {
// Get the GitHub auth token from environment variables
const githubToken = ghToken;
const owner = githubUsername;
if (!githubToken) {
throw new Error('GitHub token is not set in environment variables');
}
// Initialize Octokit with the auth token
const octokit = new Octokit({ auth: githubToken });
// Check if the repository already exists before creating it
let repo
try {
repo = await octokit.repos.get({ owner: owner, repo: repoName });
} catch (error) {
if (error instanceof Error && 'status' in error && error.status === 404) {
// Repository doesn't exist, so create a new one
const { data: newRepo } = await octokit.repos.createForAuthenticatedUser({
name: repoName,
private: false,
auto_init: true,
});
repo = newRepo;
} else {
console.log('cannot create repo!');
throw error; // Some other error occurred
}
}
// Get all files
const files = this.files.get();
if (!files || Object.keys(files).length === 0) {
throw new Error('No files found to push');
}
// Create blobs for each file
const blobs = await Promise.all(
Object.entries(files).map(async ([filePath, dirent]) => {
if (dirent?.type === 'file' && dirent.content) {
const { data: blob } = await octokit.git.createBlob({
owner: repo.owner.login,
repo: repo.name,
content: Buffer.from(dirent.content).toString('base64'),
encoding: 'base64',
});
return { path: filePath.replace(/^\/home\/project\//, ''), sha: blob.sha };
}
})
);
const validBlobs = blobs.filter(Boolean); // Filter out any undefined blobs
if (validBlobs.length === 0) {
throw new Error('No valid files to push');
}
// Get the latest commit SHA (assuming main branch, update dynamically if needed)
const { data: ref } = await octokit.git.getRef({
owner: repo.owner.login,
repo: repo.name,
ref: `heads/${repo.default_branch || 'main'}`, // Handle dynamic branch
});
const latestCommitSha = ref.object.sha;
// Create a new tree
const { data: newTree } = await octokit.git.createTree({
owner: repo.owner.login,
repo: repo.name,
base_tree: latestCommitSha,
tree: validBlobs.map((blob) => ({
path: blob!.path,
mode: '100644',
type: 'blob',
sha: blob!.sha,
})),
});
// Create a new commit
const { data: newCommit } = await octokit.git.createCommit({
owner: repo.owner.login,
repo: repo.name,
message: 'Initial commit from your app',
tree: newTree.sha,
parents: [latestCommitSha],
});
// Update the reference
await octokit.git.updateRef({
owner: repo.owner.login,
repo: repo.name,
ref: `heads/${repo.default_branch || 'main'}`, // Handle dynamic branch
sha: newCommit.sha,
});
alert(`Repository created and code pushed: ${repo.html_url}`);
} catch (error) {
console.error('Error pushing to GitHub:', error instanceof Error ? error.message : String(error));
}
}
}
export const workbenchStore = new WorkbenchStore();

View File

@ -1,3 +1,5 @@
// @ts-nocheck
// Preventing TS checks with files presented in the video for a better presentation.
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';

6
app/routes/api.models.ts Normal file
View File

@ -0,0 +1,6 @@
import { json } from '@remix-run/cloudflare';
import { MODEL_LIST } from '~/utils/constants';
export async function loader() {
return json(MODEL_LIST);
}

3
app/types/global.d.ts vendored Normal file
View File

@ -0,0 +1,3 @@
interface Window {
showDirectoryPicker(): Promise<FileSystemDirectoryHandle>;
}

View File

@ -1,3 +1,111 @@
import type { ModelInfo, OllamaApiResponse, OllamaModel } from './types';
export const WORK_DIR_NAME = 'project';
export const WORK_DIR = `/home/${WORK_DIR_NAME}`;
export const MODIFICATIONS_TAG_NAME = 'bolt_file_modifications';
export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/;
export const DEFAULT_MODEL = 'claude-3-5-sonnet-20240620';
export const DEFAULT_PROVIDER = 'Anthropic';
const staticModels: ModelInfo[] = [
{ name: 'claude-3-5-sonnet-20240620', label: 'Claude 3.5 Sonnet', provider: 'Anthropic' },
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI' },
{ name: 'anthropic/claude-3.5-sonnet', label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)', provider: 'OpenRouter' },
{ name: 'anthropic/claude-3-haiku', label: 'Anthropic: Claude 3 Haiku (OpenRouter)', provider: 'OpenRouter' },
{ name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter' },
{ name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter' },
{ name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter' },
{ name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter' },
{ name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' },
{ name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' },
{ name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' },
{ name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google'},
{ name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' },
{ name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' },
{ name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' },
{ name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq' },
{ name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq' },
{ name: 'claude-3-opus-20240229', label: 'Claude 3 Opus', provider: 'Anthropic' },
{ name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic' },
{ name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic' },
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI' },
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' },
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' },
{ name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' },
{ name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'},
{ name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'},
{ name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' },
{ name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' },
{ name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' },
{ name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral' },
{ name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral' },
{ name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral' },
{ name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral' },
{ name: 'codestral-latest', label: 'Codestral', provider: 'Mistral' },
{ name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral' },
];
export let MODEL_LIST: ModelInfo[] = [...staticModels];
const getOllamaBaseUrl = () => {
const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434';
// Check if we're in the browser
if (typeof window !== 'undefined') {
// Frontend always uses localhost
return defaultBaseUrl;
}
// Backend: Check if we're running in Docker
const isDocker = process.env.RUNNING_IN_DOCKER === 'true';
return isDocker
? defaultBaseUrl.replace("localhost", "host.docker.internal")
: defaultBaseUrl;
};
async function getOllamaModels(): Promise<ModelInfo[]> {
try {
const base_url = getOllamaBaseUrl();
const response = await fetch(`${base_url}/api/tags`);
const data = await response.json() as OllamaApiResponse;
return data.models.map((model: OllamaModel) => ({
name: model.name,
label: `${model.name} (${model.details.parameter_size})`,
provider: 'Ollama',
}));
} catch (e) {
return [];
}
}
async function getOpenAILikeModels(): Promise<ModelInfo[]> {
try {
const base_url =import.meta.env.OPENAI_LIKE_API_BASE_URL || "";
if (!base_url) {
return [];
}
const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? "";
const response = await fetch(`${base_url}/models`, {
headers: {
Authorization: `Bearer ${api_key}`,
}
});
const res = await response.json() as any;
return res.data.map((model: any) => ({
name: model.id,
label: model.id,
provider: 'OpenAILike',
}));
}catch (e) {
return []
}
}
async function initializeModelList(): Promise<void> {
const ollamaModels = await getOllamaModels();
const openAiLikeModels = await getOpenAILikeModels();
MODEL_LIST = [...ollamaModels,...openAiLikeModels, ...staticModels];
}
initializeModelList().then();
export { getOllamaModels, getOpenAILikeModels, initializeModelList };

28
app/utils/types.ts Normal file
View File

@ -0,0 +1,28 @@
interface OllamaModelDetails {
parent_model: string;
format: string;
family: string;
families: string[];
parameter_size: string;
quantization_level: string;
}
export interface OllamaModel {
name: string;
model: string;
modified_at: string;
size: number;
digest: string;
details: OllamaModelDetails;
}
export interface OllamaApiResponse {
models: OllamaModel[];
}
export interface ModelInfo {
name: string;
label: string;
provider: string;
}

61
docker-compose.yaml Normal file
View File

@ -0,0 +1,61 @@
services:
bolt-ai:
image: bolt-ai:production
build:
context: .
dockerfile: Dockerfile
target: bolt-ai-production
ports:
- "5173:5173"
env_file: ".env.local"
environment:
- NODE_ENV=production
- COMPOSE_PROFILES=production
# No strictly neded but serving as hints for Coolify
- PORT=5173
- GROQ_API_KEY=${GROQ_API_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY}
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"
command: pnpm run dockerstart
profiles:
- production # This service only runs in the production profile
bolt-ai-dev:
image: bolt-ai:development
build:
target: bolt-ai-development
environment:
- NODE_ENV=development
- VITE_HMR_PROTOCOL=ws
- VITE_HMR_HOST=localhost
- VITE_HMR_PORT=5173
- CHOKIDAR_USEPOLLING=true
- WATCHPACK_POLLING=true
- PORT=5173
- GROQ_API_KEY=${GROQ_API_KEY}
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY}
- GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY}
- OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL}
- VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug}
- RUNNING_IN_DOCKER=true
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- type: bind
source: .
target: /app
consistency: cached
- /app/node_modules
ports:
- "5173:5173" # Same port, no conflict as only one runs at a time
command: pnpm run dev --host 0.0.0.0
profiles: ["development", "default"] # Make development the default profile

View File

@ -3,7 +3,6 @@
"description": "StackBlitz AI Agent",
"private": true,
"license": "MIT",
"packageManager": "pnpm@9.4.0",
"sideEffects": false,
"type": "module",
"scripts": {
@ -15,6 +14,10 @@
"lint": "eslint --cache --cache-location ./node_modules/.cache/eslint .",
"lint:fix": "npm run lint -- --fix",
"start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings",
"dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session",
"dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai",
"dockerbuild:prod": "docker build -t bolt-ai:production bolt-ai:latest --target bolt-ai-production .",
"dockerbuild": "docker build -t bolt-ai:development -t bolt-ai:latest --target bolt-ai-development .",
"typecheck": "tsc",
"typegen": "wrangler types",
"preview": "pnpm run build && pnpm run start"
@ -24,6 +27,9 @@
},
"dependencies": {
"@ai-sdk/anthropic": "^0.0.39",
"@ai-sdk/google": "^0.0.52",
"@ai-sdk/openai": "^0.0.66",
"@ai-sdk/mistral": "^0.0.43",
"@codemirror/autocomplete": "^6.17.0",
"@codemirror/commands": "^6.6.0",
"@codemirror/lang-cpp": "^6.0.2",
@ -43,6 +49,9 @@
"@iconify-json/svg-spinners": "^1.1.2",
"@lezer/highlight": "^1.2.0",
"@nanostores/react": "^0.7.2",
"@octokit/rest": "^21.0.2",
"@octokit/types": "^13.6.1",
"@openrouter/ai-sdk-provider": "^0.0.5",
"@radix-ui/react-dialog": "^1.1.1",
"@radix-ui/react-dropdown-menu": "^2.1.1",
"@remix-run/cloudflare": "^2.10.2",
@ -54,14 +63,17 @@
"@xterm/addon-fit": "^0.10.0",
"@xterm/addon-web-links": "^0.11.0",
"@xterm/xterm": "^5.5.0",
"ai": "^3.3.4",
"ai": "^3.4.9",
"date-fns": "^3.6.0",
"diff": "^5.2.0",
"file-saver": "^2.0.5",
"framer-motion": "^11.2.12",
"isbot": "^4.1.0",
"istextorbinary": "^9.5.0",
"jose": "^5.6.3",
"jszip": "^3.10.1",
"nanostores": "^0.10.3",
"ollama-ai-provider": "^0.15.2",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-hotkeys-hook": "^4.5.0",
@ -71,9 +83,9 @@
"rehype-raw": "^7.0.0",
"rehype-sanitize": "^6.0.0",
"remark-gfm": "^4.0.0",
"remix-island": "^0.2.0",
"remix-utils": "^7.6.0",
"shiki": "^1.9.1",
"remix-island": "^0.2.0",
"unist-util-visit": "^5.0.0"
},
"devDependencies": {
@ -81,12 +93,14 @@
"@cloudflare/workers-types": "^4.20240620.0",
"@remix-run/dev": "^2.10.0",
"@types/diff": "^5.2.1",
"@types/file-saver": "^2.0.7",
"@types/react": "^18.2.20",
"@types/react-dom": "^18.2.7",
"fast-glob": "^3.3.2",
"is-ci": "^3.0.1",
"node-fetch": "^3.3.2",
"prettier": "^3.3.2",
"sass-embedded": "^1.80.3",
"typescript": "^5.5.2",
"unified": "^11.0.5",
"unocss": "^0.61.3",
@ -100,5 +114,6 @@
},
"resolutions": {
"@typescript-eslint/utils": "^8.0.0-alpha.30"
}
},
"packageManager": "pnpm@9.12.2+sha512.22721b3a11f81661ae1ec68ce1a7b879425a1ca5b991c975b074ac220b187ce56c708fe5db69f4c962c989452eee76c82877f4ee80f474cebd61ee13461b6228"
}

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
import { cloudflareDevProxyVitePlugin as remixCloudflareDevProxy, vitePlugin as remixVitePlugin } from '@remix-run/dev';
import UnoCSS from 'unocss/vite';
import { defineConfig } from 'vite';
import { defineConfig, type ViteDevServer } from 'vite';
import { nodePolyfills } from 'vite-plugin-node-polyfills';
import { optimizeCssModules } from 'vite-plugin-optimize-css-modules';
import tsconfigPaths from 'vite-tsconfig-paths';
@ -24,7 +24,42 @@ export default defineConfig((config) => {
}),
UnoCSS(),
tsconfigPaths(),
chrome129IssuePlugin(),
config.mode === 'production' && optimizeCssModules({ apply: 'build' }),
],
envPrefix:["VITE_","OPENAI_LIKE_API_","OLLAMA_API_BASE_URL"],
css: {
preprocessorOptions: {
scss: {
api: 'modern-compiler',
},
},
},
};
});
function chrome129IssuePlugin() {
return {
name: 'chrome129IssuePlugin',
configureServer(server: ViteDevServer) {
server.middlewares.use((req, res, next) => {
const raw = req.headers['user-agent']?.match(/Chrom(e|ium)\/([0-9]+)\./);
if (raw) {
const version = parseInt(raw[2], 10);
if (version === 129) {
res.setHeader('content-type', 'text/html');
res.end(
'<body><h1>Please use Chrome Canary for testing.</h1><p>Chrome 129 has an issue with JavaScript modules & Vite local development, see <a href="https://github.com/stackblitz/bolt.new/issues/86#issuecomment-2395519258">for more information.</a></p><p><b>Note:</b> This only impacts <u>local development</u>. `pnpm run build` and `pnpm run start` will work fine in this browser.</p></body>',
);
return;
}
}
next();
});
},
};
}

View File

@ -1,3 +1,10 @@
interface Env {
ANTHROPIC_API_KEY: string;
OPENAI_API_KEY: string;
GROQ_API_KEY: string;
OPEN_ROUTER_API_KEY: string;
OLLAMA_API_BASE_URL: string;
OPENAI_LIKE_API_KEY: string;
OPENAI_LIKE_API_BASE_URL: string;
DEEPSEEK_API_KEY: string;
}

View File

@ -3,3 +3,4 @@ name = "bolt"
compatibility_flags = ["nodejs_compat"]
compatibility_date = "2024-07-01"
pages_build_output_dir = "./build/client"
send_metrics = false