diff --git a/README.md b/README.md index 600acc1d..33f861fa 100644 --- a/README.md +++ b/README.md @@ -40,18 +40,18 @@ https://thinktank.ottomator.ai - ✅ Together Integration (@mouimet-infinisoft) - ✅ Mobile friendly (@qwikode) - ✅ Better prompt enhancing (@SujalXplores) -- ⬜ **HIGH PRIORITY** - ALMOST DONE - Attach images to prompts (@atrokhym) +- ✅ Attach images to prompts (@atrokhym) - ⬜ **HIGH PRIORITY** - Prevent Bolt from rewriting files as often (file locking and diffs) - ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start) - ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call -- ⬜ Azure Open AI API Integration -- ⬜ Perplexity Integration -- ⬜ Vertex AI Integration - ⬜ Deploy directly to Vercel/Netlify/other similar platforms - ⬜ Have LLM plan the project in a MD file for better results/transparency - ⬜ VSCode Integration with git-like confirmations - ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc. - ⬜ Voice prompting +- ⬜ Azure Open AI API Integration +- ⬜ Perplexity Integration +- ⬜ Vertex AI Integration ## Bolt.new: AI-Powered Full-Stack Web Development in the Browser diff --git a/app/components/chat/Artifact.tsx b/app/components/chat/Artifact.tsx index 682a4c76..3ca2508c 100644 --- a/app/components/chat/Artifact.tsx +++ b/app/components/chat/Artifact.tsx @@ -28,6 +28,7 @@ interface ArtifactProps { export const Artifact = memo(({ messageId }: ArtifactProps) => { const userToggledActions = useRef(false); const [showActions, setShowActions] = useState(false); + const [allActionFinished, setAllActionFinished] = useState(false); const artifacts = useStore(workbenchStore.artifacts); const artifact = artifacts[messageId]; @@ -47,6 +48,11 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => { if (actions.length && !showActions && !userToggledActions.current) { setShowActions(true); } + + if (actions.length !== 0) { + const finished = !actions.find((action) => action.status !== 'complete'); + setAllActionFinished(finished); + } }, [actions]); return ( @@ -59,6 +65,18 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => { workbenchStore.showWorkbench.set(!showWorkbench); }} > + {artifact.type == 'bundled' && ( + <> +
+ {allActionFinished ? ( +
+ ) : ( +
+ )} +
+
+ + )}
{artifact?.title}
Click to open Workbench
@@ -66,7 +84,7 @@ export const Artifact = memo(({ messageId }: ArtifactProps) => {
- {actions.length && ( + {actions.length && artifact.type !== 'bundled' && ( {
- {showActions && actions.length > 0 && ( + {artifact.type !== 'bundled' && showActions && actions.length > 0 && ( { transition={{ duration: 0.15 }} >
+
diff --git a/app/components/chat/BaseChat.tsx b/app/components/chat/BaseChat.tsx index b69460fc..a0a87f83 100644 --- a/app/components/chat/BaseChat.tsx +++ b/app/components/chat/BaseChat.tsx @@ -23,44 +23,9 @@ import { ImportButtons } from '~/components/chat/chatExportAndImport/ImportButto import { ExamplePrompts } from '~/components/chat/ExamplePrompts'; import GitCloneButton from './GitCloneButton'; -// @ts-ignore TODO: Introduce proper types -// eslint-disable-next-line @typescript-eslint/no-unused-vars -const ModelSelector = ({ model, setModel, provider, setProvider, modelList, providerList, apiKeys }) => { - return ( -
- - -
- ); -}; +import FilePreview from './FilePreview'; +import { ModelSelector } from '~/components/chat/ModelSelector'; +import { SpeechRecognitionButton } from '~/components/chat/SpeechRecognition'; const TEXTAREA_MIN_HEIGHT = 76; @@ -86,6 +51,10 @@ interface BaseChatProps { enhancePrompt?: () => void; importChat?: (description: string, messages: Message[]) => Promise; exportChat?: () => void; + uploadedFiles?: File[]; + setUploadedFiles?: (files: File[]) => void; + imageDataList?: string[]; + setImageDataList?: (dataList: string[]) => void; } export const BaseChat = React.forwardRef( @@ -97,20 +66,24 @@ export const BaseChat = React.forwardRef( showChat = true, chatStarted = false, isStreaming = false, - enhancingPrompt = false, - promptEnhanced = false, - messages, - input = '', model, setModel, provider, setProvider, - sendMessage, + input = '', + enhancingPrompt, handleInputChange, + promptEnhanced, enhancePrompt, + sendMessage, handleStop, importChat, exportChat, + uploadedFiles = [], + setUploadedFiles, + imageDataList = [], + setImageDataList, + messages, }, ref, ) => { @@ -118,7 +91,11 @@ export const BaseChat = React.forwardRef( const [apiKeys, setApiKeys] = useState>({}); const [modelList, setModelList] = useState(MODEL_LIST); const [isModelSettingsCollapsed, setIsModelSettingsCollapsed] = useState(false); + const [isListening, setIsListening] = useState(false); + const [recognition, setRecognition] = useState(null); + const [transcript, setTranscript] = useState(''); + console.log(transcript); useEffect(() => { // Load API keys from cookies on component mount try { @@ -141,8 +118,72 @@ export const BaseChat = React.forwardRef( initializeModelList().then((modelList) => { setModelList(modelList); }); + + if (typeof window !== 'undefined' && ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window)) { + const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition; + const recognition = new SpeechRecognition(); + recognition.continuous = true; + recognition.interimResults = true; + + recognition.onresult = (event) => { + const transcript = Array.from(event.results) + .map((result) => result[0]) + .map((result) => result.transcript) + .join(''); + + setTranscript(transcript); + + if (handleInputChange) { + const syntheticEvent = { + target: { value: transcript }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + }; + + recognition.onerror = (event) => { + console.error('Speech recognition error:', event.error); + setIsListening(false); + }; + + setRecognition(recognition); + } }, []); + const startListening = () => { + if (recognition) { + recognition.start(); + setIsListening(true); + } + }; + + const stopListening = () => { + if (recognition) { + recognition.stop(); + setIsListening(false); + } + }; + + const handleSendMessage = (event: React.UIEvent, messageInput?: string) => { + if (sendMessage) { + sendMessage(event, messageInput); + + if (recognition) { + recognition.abort(); // Stop current recognition + setTranscript(''); // Clear transcript + setIsListening(false); + + // Clear the input by triggering handleInputChange with empty value + if (handleInputChange) { + const syntheticEvent = { + target: { value: '' }, + } as React.ChangeEvent; + handleInputChange(syntheticEvent); + } + } + } + }; + const updateApiKey = (provider: string, key: string) => { try { const updatedApiKeys = { ...apiKeys, [provider]: key }; @@ -160,6 +201,58 @@ export const BaseChat = React.forwardRef( } }; + const handleFileUpload = () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'image/*'; + + input.onchange = async (e) => { + const file = (e.target as HTMLInputElement).files?.[0]; + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + }; + + input.click(); + }; + + const handlePaste = async (e: React.ClipboardEvent) => { + const items = e.clipboardData?.items; + + if (!items) { + return; + } + + for (const item of items) { + if (item.type.startsWith('image/')) { + e.preventDefault(); + + const file = item.getAsFile(); + + if (file) { + const reader = new FileReader(); + + reader.onload = (e) => { + const base64Image = e.target?.result as string; + setUploadedFiles?.([...uploadedFiles, file]); + setImageDataList?.([...imageDataList, base64Image]); + }; + reader.readAsDataURL(file); + } + + break; + } + } + }; + const baseChat = (
( )}
- + { + setUploadedFiles?.(uploadedFiles.filter((_, i) => i !== index)); + setImageDataList?.(imageDataList.filter((_, i) => i !== index)); + }} + />
( >