diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index a594bc8..37ebae5 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -56,6 +56,16 @@ body: - OS: [e.g. macOS, Windows, Linux] - Browser: [e.g. Chrome, Safari, Firefox] - Version: [e.g. 91.1] + - type: input + id: provider + attributes: + label: Provider Used + description: Tell us the provider you are using. + - type: input + id: model + attributes: + label: Model Used + description: Tell us the model you are using. - type: textarea id: additional attributes: diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 16a5b72..c9eb890 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -16,10 +16,10 @@ jobs: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-issue-message: "This issue has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." stale-pr-message: "This pull request has been marked as stale due to inactivity. If no further activity occurs, it will be closed in 7 days." - days-before-stale: 14 # Number of days before marking an issue or PR as stale - days-before-close: 7 # Number of days after being marked stale before closing + days-before-stale: 10 # Number of days before marking an issue or PR as stale + days-before-close: 4 # Number of days after being marked stale before closing stale-issue-label: "stale" # Label to apply to stale issues stale-pr-label: "stale" # Label to apply to stale pull requests exempt-issue-labels: "pinned,important" # Issues with these labels won't be marked stale exempt-pr-labels: "pinned,important" # PRs with these labels won't be marked stale - operations-per-run: 90 # Limits the number of actions per run to avoid API rate limits + operations-per-run: 75 # Limits the number of actions per run to avoid API rate limits diff --git a/.husky/pre-commit b/.husky/pre-commit index 966a4ad..05fe9ee 100644 --- a/.husky/pre-commit +++ b/.husky/pre-commit @@ -2,6 +2,9 @@ echo "🔍 Running pre-commit hook to check the code looks good... 🔍" +export NVM_DIR="$HOME/.nvm" +[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # Load nvm if you're using i + if ! pnpm typecheck; then echo "❌ Type checking failed! Please review TypeScript types." echo "Once you're done, don't forget to add your changes to the commit! 🚀" @@ -9,7 +12,7 @@ if ! pnpm typecheck; then fi if ! pnpm lint; then - echo "❌ Linting failed! 'pnpm lint:check' will help you fix the easy ones." + echo "❌ Linting failed! 'pnpm lint:fix' will help you fix the easy ones." echo "Once you're done, don't forget to add your beautification to the commit! 🤩" exit 1 fi diff --git a/README.md b/README.md index 9acf882..33f861f 100644 --- a/README.md +++ b/README.md @@ -4,10 +4,13 @@ This fork of Bolt.new (oTToDev) allows you to choose the LLM that you use for each prompt! Currently, you can use OpenAI, Anthropic, Ollama, OpenRouter, Gemini, LMStudio, Mistral, xAI, HuggingFace, DeepSeek, or Groq models - and it is easily extended to use any other model supported by the Vercel AI SDK! See the instructions below for running this locally and extending it to include more models. +Check the [oTToDev Docs](https://coleam00.github.io/bolt.new-any-llm/) for more information. + ## Join the community for oTToDev! https://thinktank.ottomator.ai + ## Requested Additions - Feel Free to Contribute! - ✅ OpenRouter Integration (@coleam00) @@ -31,23 +34,24 @@ https://thinktank.ottomator.ai - ✅ Ability to revert code to earlier version (@wonderwhy-er) - ✅ Cohere Integration (@hasanraiyan) - ✅ Dynamic model max token length (@hasanraiyan) +- ✅ Better prompt enhancing (@SujalXplores) - ✅ Prompt caching (@SujalXplores) - ✅ Load local projects into the app (@wonderwhy-er) - ✅ Together Integration (@mouimet-infinisoft) - ✅ Mobile friendly (@qwikode) - ✅ Better prompt enhancing (@SujalXplores) -- ⬜ **HIGH PRIORITY** - ALMOST DONE - Attach images to prompts (@atrokhym) +- ✅ Attach images to prompts (@atrokhym) - ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs) - ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start) - ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call -- ⬜ Azure Open AI API Integration -- ⬜ Perplexity Integration -- ⬜ Vertex AI Integration - ⬜ Deploy directly to Vercel/Netlify/other similar platforms - ⬜ Have LLM plan the project in a MD file for better results/transparency - ⬜ VSCode Integration with git-like confirmations - ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc. - ⬜ Voice prompting +- ⬜ Azure Open AI API Integration +- ⬜ Perplexity Integration +- ⬜ Vertex AI Integration ## Bolt.new: AI-Powered Full-Stack Web Development in the Browser diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index 5e213a6..8c7589a 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -22,44 +22,9 @@ import { ExportChatButton } from '~/components/chat/chatExportAndImport/ExportCh import { ImportButtons } from '~/components/chat/chatExportAndImport/ImportButtons'; import { ExamplePrompts } from '~/components/chat/ExamplePrompts'; -// @ts-ignore TODO: Introduce proper types -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList, apiKeys }) => { - return ( -
- - -
- ); -}; +import FilePreview from './FilePreview'; +import { ModelSelector } from '~/components/chat/ModelSelector'; +import { SpeechRecognitionButton } from '~/components/chat/SpeechRecognition'; const TEXTAREA_MIN_HEIGHT = 76; @@ -85,6 +50,10 @@ interface BaseChatProps { enhancePrompt?: () => void; importChat?: (description: string, messages: Message[]) => Promise; exportChat?: () => void; + uploadedFiles?: File[]; + setUploadedFiles?: (files: File[]) => void; + imageDataList?: string[]; + setImageDataList?: (dataList: string[]) => void; } export const BaseChat = React.forwardRef( @@ -96,20 +65,24 @@ export const BaseChat = React.forwardRef( showChat = true, chatStarted = false, isStreaming = false, - enhancingPrompt = false, - promptEnhanced = false, - messages, - input = '', model, setModel, provider, setProvider, - sendMessage, + input = '', + enhancingPrompt, handleInputChange, + promptEnhanced, enhancePrompt, + sendMessage, handleStop, importChat, exportChat, + uploadedFiles = [], + setUploadedFiles, + imageDataList = [], + setImageDataList, + messages, }, ref, ) => { @@ -117,7 +90,11 @@ export const BaseChat = React.forwardRef( const [apiKeys, setApiKeys] = useState>({}); const [modelList, setModelList] = useState(MODEL_LIST); const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); + const [isListening, setIsListening] = useState(false); + const [recognition, setRecognition] = useState(null); + const [transcript, setTranscript] = useState(''); + console.log(transcript); useEffect(() => { // Load API keys from cookies on component mount try { @@ -140,8 +117,72 @@ export const BaseChat = React.forwardRef( initializeModelList().then((modelList) => { setModelList(modelList); }); + + if (typeof window !== 'undefined' && ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; + const recognition = new SpeechRecognition(); + recognition.continuous = true; + recognition.interimResults = true; + + recognition.onresult = (event) => { + const transcript = Array.from(event.results) + .map((result) => result[0]) + .map((result) => result.transcript) + .join(''); + + setTranscript(transcript); + + if (handleInputChange) { + const syntheticEvent = { + target: { value: transcript }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + }; + + recognition.onerror = (event) => { + console.error('Speech recognition error:', event.error); + setIsListening(false); + }; + + setRecognition(recognition); + } }, []); + const startListening = () => { + if (recognition) { + recognition.start(); + setIsListening(true); + } + }; + + const stopListening = () => { + if (recognition) { + recognition.stop(); + setIsListening(false); + } + }; + + const handleSendMessage = (event: React.UIEvent, messageInput?: string) => { + if (sendMessage) { + sendMessage(event, messageInput); + + if (recognition) { + recognition.abort(); // Stop current recognition + setTranscript(''); // Clear transcript + setIsListening(false); + + // Clear the input by triggering handleInputChange with empty value + if (handleInputChange) { + const syntheticEvent = { + target: { value: '' }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + } + } + }; + const updateApiKey = (provider: string, key: string) => { try { const updatedApiKeys = { ...apiKeys, [provider]: key }; @@ -159,6 +200,58 @@ export const BaseChat = React.forwardRef( } }; + const handleFileUpload = () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'image/*'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement).files?.[0]; + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + }; + + input.click(); + }; + + const handlePaste = async (e: React.ClipboardEvent) => { + const items = e.clipboardData?.items; + + if (!items) { + return; + } + + for (const item of items) { + if (item.type.startsWith('image/')) { + e.preventDefault(); + + const file = item.getAsFile(); + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + + break; + } + } + }; + const baseChat = (
( )}
- + { + setUploadedFiles?.(uploadedFiles.filter((_, i) => i !== index)); + setImageDataList?.(imageDataList.filter((_, i) => i !== index)); + }} + />
( >