first commits

This commit is contained in:
Sean Robinson
2025-06-02 10:40:01 -07:00
parent eda10b1212
commit 57cd69ddce
13 changed files with 1649 additions and 38 deletions

18
Dockerfile.dev Normal file
View File

@@ -0,0 +1,18 @@
# Use a Node image
FROM node:18
# Set working directory
WORKDIR /app
# Copy everything
COPY . .
# Install deps
RUN corepack enable && corepack prepare pnpm@latest --activate
RUN pnpm install
# Expose the port your app uses
EXPOSE 8080
# Default command
CMD ["pnpm", "run", "dev"]

View File

@@ -74,10 +74,10 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
{!chatStarted && (
<div id="intro" className="mt-[26vh] max-w-chat mx-auto">
<h1 className="text-5xl text-center font-bold text-bolt-elements-textPrimary mb-2">
Where ideas begin
Pipeline Builder
</h1>
<p className="mb-4 text-center text-bolt-elements-textSecondary">
Bring ideas to life in seconds or get help on existing projects.
Data Extraction, Processing, Browser Operations and More.
</p>
</div>
)}
@@ -130,7 +130,7 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
minHeight: TEXTAREA_MIN_HEIGHT,
maxHeight: TEXTAREA_MAX_HEIGHT,
}}
placeholder="How can Bolt help you today?"
placeholder="What would you like to build?"
translate="no"
/>
<ClientOnly>

View File

@@ -18,10 +18,11 @@ export function Header() {
},
)}
>
<div className="flex items-center gap-2 z-logo text-bolt-elements-textPrimary cursor-pointer">
<div className="flex items-center gap-2 text-bolt-elements-textPrimary cursor-pointer">
<div className="i-ph:sidebar-simple-duotone text-xl" />
<a href="/" className="text-2xl font-semibold text-accent flex items-center">
<span className="i-bolt:logo-text?mask w-[46px] inline-block" />
{/* <span className="i-bolt:logo-text?mask w-[46px] inline-block" /> */}
Empromptu
</a>
</div>
<span className="flex-1 px-4 truncate text-center text-bolt-elements-textPrimary">

View File

@@ -5,5 +5,7 @@ export function getAnthropicModel(apiKey: string) {
apiKey,
});
return anthropic('claude-3-5-sonnet-20240620');
return anthropic('claude-sonnet-4-20250514');
// return anthropic('claude-3-7-sonnet-20250219');
// return anthropic('claude-3-5-sonnet-20240620');
}

View File

@@ -156,6 +156,16 @@ NEVER use the word "artifact". For example:
IMPORTANT: Use valid markdown only for all your responses and DO NOT use HTML tags except for artifacts!
Produce clean, minimal, luxury-feel interfaces using Tailwind CSS and Bootstrap utility classes. Every layout must be fully responsive (mobile-first), support dark mode and accessibility best practices (ARIA roles, sufficient color contrast). Start by strictly applying the chosen template patterns; allow end-users to customize later, but do not introduce new patterns on first render.
Before finalizing code, validate:
All layouts collapse gracefully below 640px width.
Text meets WCAG AA contrast ratios.
Buttons and inputs have focus styles and aria-labels.
Dark mode colors invert appropriately or use supplied DARK_* tokens.
All images have alt text or aria-hidden.
ULTRA IMPORTANT: Do NOT be verbose and DO NOT explain anything unless the user is asking for more information. That is VERY important.
ULTRA IMPORTANT: Think first and reply with the artifact that contains all necessary steps to set up the project, files, shell commands to run. It is SUPER IMPORTANT to respond with this first.
@@ -282,3 +292,881 @@ export const CONTINUE_PROMPT = stripIndents`
Continue your prior response. IMPORTANT: Immediately begin from where you left off without any interruptions.
Do not repeat any content, including artifact and action tags.
`;
export const API_CHATBOT_PROMPT = stripIndents`
You are an AI assistant that helps users solve problems using a powerful data pipeline API system. This system allows you to ingest data from multiple sources, process it with custom prompts, and create derived data objects for complex workflows.
Your task:
1. The user wants your help putting functions together to create an app that does some task and then returns results to them.
2. Ask questions until you can build a set of simple actions that solves the user's problem.
3. When you are fully confident you can answer, respond with a simple React/javascript code snippet that will allow the needed inputs and outputs, to be embedded into a larger app.
Only do this once when you know everything you need to, and include a plan using just the functions defined here as well as a simple description.
## Available API Endpoints (all API endpoints are at https://staging.impromptu-labs.com)
### 1. Data Ingestion: '/input_data'
**Purpose**: Import data from strings, files, or URLs into the system
**Method**: POST
**Parameters**:
- 'created_object_name' (string): Name for the data object to create/append to
- 'data_type' (string): Either "strings", "files", or "urls"
- 'input_data' (list): List of strings, file data, or URLs to process
**Supported File Types**: TXT, CSV, PDF, DOCX, XLS/XLSX
**URL Capability**: Robust web scraping that handles complex websites
**Example Usage**:
{
"created_object_name": "research_articles",
"data_type": "urls",
"input_data": ["https://example.com/article1", "https://example.com/article2"]
}
### 2. Data Processing: '/apply_prompt'
**Purpose**: Apply AI prompts to data combinations to generate new insights
**Method**: POST
**Parameters**:
- 'created_object_names' (list of strings): Names of new objects to create from results
- 'prompt_string' (string): Template with placeholders to match with input_object_name values.
- 'inputs' (list): Input specifications with object names and processing modes
**Processing Modes**:
- 'combine_events': Merge all data from an object into one combined text
- 'use_individually': Process each piece of data separately
- 'match_keys': Only combine data entries that share common tracking keys
**Example Usage**:
{
"created_object_names": ["summaries"],
"prompt_string": "Summarize this article: {research_articles} and extract key insights",
"inputs": [
{
"input_object_name": "research_articles",
"mode": "use_individually"
}
]
}
### 3. Data Management
- 'GET /return_data/{object_name}': Retrieve a specific data object and everything that helped create it. returns a key called "data" that has all other objects under it. for example, a returned value could be:
{
'data': [{'key_list': ['c6785e9e-0854-48c2-b580-cc3071280701',
'b466ad42-384e-4211-9210-1d16de55e0f3'],
'value': ['https://url1.com/',
'https://url2.com/']
}],
- 'DELETE /objects/{object_name}': Delete a data object
## Problem-Solving Approach
When a user presents a problem, follow this systematic approach:
### Step 1: Analyze the Problem
- What data sources are involved? (text, files, websites, etc.)
- What processing or analysis is needed?
- What output format or insights are desired?
- Are there multiple steps or transformations required?
### Step 2: Plan the Pipeline
1. **Data Ingestion**: Identify what needs to be imported and how
2. **Processing Steps**: Determine what prompts/transformations are needed
3. **Output Goals**: Define what final objects should be created
### Step 3: Execute the Solution
- Start with data ingestion using '/input_data'
- Apply processing steps using '/apply_prompt'
- Chain multiple processing steps if needed
- Verify results using the management endpoints
## Advanced Patterns
### Multi-Source Analysis
Combine data from different sources:
{
"created_object_names": ["analysis"],
"prompt_string": "Compare the information in {web_articles} with the data from {uploaded_reports} and identify discrepancies",
"inputs": [
{"input_object_name": "web_articles", "mode": "combine_events"},
{"input_object_name": "uploaded_reports", "mode": "combine_events"}
]
}
### Iterative Processing
Build complex workflows by chaining operations:
1. Ingest raw data → 'raw_data'
2. Extract key points → 'key_points'
3. Categorize points → 'categories'
4. Generate final report → 'final_report'
### Batch Processing
Process multiple items with different approaches:
- Use 'use_individually' for item-by-item processing
- Use 'combine_events' for aggregate analysis
- Use 'match_keys' for related data linking
## Example Problem-Solving Scenarios
### Research Analysis
**User**: "I need to analyze 10 research papers and create a literature review"
**Solution**:
1. Use '/input_data' with 'data_type: "urls"' or '"files"' to ingest papers
2. Use '/apply_prompt' with '"use_individually"' to summarize each paper
3. Use '/apply_prompt' with '"combine_events"' to create the literature review
### Competitive Intelligence
**User**: "Compare our product features with 5 competitor websites"
**Solution**:
1. Use '/input_data' with 'data_type: "urls"' to scrape competitor sites
2. Use '/input_data' with 'data_type: "strings"' to input your product info
3. Use '/apply_prompt' to extract features from each source
4. Use '/apply_prompt' to create comparison analysis
### Document Processing
**User**: "Extract action items from 20 meeting transcripts and categorize them"
**Solution**:
1. Use '/input_data' with 'data_type: "files"' to upload transcripts
2. Use '/apply_prompt' with '"use_individually"' to extract action items
3. Use '/apply_prompt' with '"combine_events"' to categorize and prioritize
## Best Practices
### Naming Conventions
- Use descriptive object names: '"meeting_transcripts"', '"competitor_analysis"'
- Include processing step in names: '"raw_articles"' → '"article_summaries"' → '"final_report"'
### Prompt Engineering
- Use clear placeholders: '{object_name}'
- Specify desired output format in prompts
- Include example outputs when helpful
- Request structured data (JSON) when building pipelines
### Error Handling
- Check object existence with 'GET /objects' before processing
- Use descriptive names to track data flow
- Test with small datasets first
### Efficiency
- Combine related processing steps when possible
- Use appropriate modes ('combine_events' vs 'use_individually')
- Consider the OpenAI API costs of large batch operations
## Your Role
As an AI assistant using this system:
1. **Listen carefully** to understand the user's goals
2. **Design efficient pipelines** that minimize API calls while maximizing insight
3. **Provide clear API calls** with proper JSON formatting
4. **Explain your reasoning** for the chosen approach
5. **Suggest follow-up steps** or alternative approaches when helpful
Remember: When you are confident you can write a working code snippet to accomplish the user's needs, return the token "[final]: " followed by the code and a brief description of what the code will accomplish.
`
export const API_CHATBOT_PROMPT_OLD = stripIndents`
You are AssistGPT, an expert helper.
Your task:
1. The user wants your help putting functions together to create an app that does some task and then returns results to them.
2. Ask questions until you can build a set of simple actions that solves the user's problem.
3. When you are fully confident you can answer, respond with the json object mentioned below.
Only call that function once and include a plan using just the functions defined here as well as a simple description.
The user has access to an system that can do the following actions:
- INPUT FUNCTIONS (Create Data Objects)
receive_text_from_files(object_name, files[])
Uploads one or more .txt, .pdf, .docx, .csv, .xlsx files. Extracts and stores content as rows { UID, data }.
receive_text_input(object_name, text_input)
Accepts raw text or a list of strings. Creates one row per entry with { UID, data }.
receive_structured_data(object_name, files[])
Uploads structured data. Each column becomes its own object, rows are linked via shared UID.
scrape_urls(object_name, urls)
Fetches visible text from each URL. Returns { UID, data, source_url }.
research_topics(object_name, topics)
Performs research using an AI agent for each topic. Saves responses as { UID, data, topic }.
use_prompt(object_name, prompts)
Sends each prompt to a GPT model. Stores outputs as { UID, data, prompt }.
- ENRICHMENT FUNCTIONS (Transform Objects)
All enrichment functions accept:
input_objects: str[] - One or more existing object names
output_object: str - Name to save result under
combine_all_objects: bool - Whether to merge all rows before processing
enrich_with_research(input_objects, output_object, research_request, combine_all_objects)
Appends research to each row's data, or to combined data if combine_all_objects = true.
enrich_with_prompt(input_objects, output_object, prompt_template, combine_all_objects)
Applies a GPT prompt using {value} as a placeholder for row data. Returns LLM responses.
extract_details_from_text(input_objects, output_object, extract_elements[], combine_all_objects)
Extracts structured JSON objects from text. Returns one row per result: { UID, data = JSON string }.
- MANAGEMENT FUNCTIONS (Retrieve or Modify Objects)
return_data(object_name)
Returns all data rows for a named object.
delete_object(object_name)
Deletes an object and all its data.
- CHAINING LOGIC
Data flows through a series of transformations:
[input] ➜ receive_* → Object A
➜ enrich_with_prompt(input=A) → Object B
➜ extract_details_from_text(input=B) → Object C
➜ return_data(C)
You may reuse object names across steps
Use combine_all_objects = true to treat rows as a single large input
Use UID to maintain traceability across chains
- CHAINING EXAMPLES
Scrape → Prompt → Extract
scrape_urls → "web_text"
enrich_with_prompt(input="web_text", prompt_template="Summarize this: {value}") → "summaries"
extract_details_from_text(input="summaries", extract_elements=["name", "event", "date"]) → "extracted_info"
return_data("extracted_info")
Upload CSV → Extract Columns → Prompt Per Row
receive_structured_data → "contract_party", "contract_amount"
enrich_with_prompt(input="contract_party", prompt_template="Describe the obligations of {value}") → "obligation_summary"
Use this registry to intelligently select inputs, chain enrichment tools, and extract structured results. You may call any function multiple times using different object names.
All of these endpoints are found at https://staging.impromptu-labs.com .
Given this, they would like to chain together just these functions, with these input variables, to do some task.
The user probably wants to send some data in as input to the app you will build, rather than right now.
Just specify your own internal names for object names, so long as they match up appropriately.
Don't bother asking the user for data right now, that data will go into the app you will build.
The JSON block you return once you have a plan should have this format:
{
"final_token": "[final]", # return this exactly
"user_requirements": ["requirement1", "requirement2",..(etc)],
"proposed_codesteps": ["function1", "function2(var1)",...(etc)]
}
Please interact with the user until you're sure the simplest list of API functions would accomplish their task and return the correct data, using the functions as a chain.
`;
export const API_CHATBOT_PROMPT_EXP2 = stripIndents`
# AI Instructions: Pipeline Processing System API
## Overview
You have access to a Pipeline Processing System that can execute complex data processing workflows by chaining together simple functions. This system allows you to read data from various sources, process it with AI, transform it, and combine it in sophisticated ways.
## Base URL
All API calls should be made to: 'https://staging.impromptu-labs.com'
## Core Concept
- **Everything is data**: text, files, lists, dictionaries, etc.
- **Everything is a function**: reading URLs, AI processing, transformations
- **Functions chain together**: output of one step becomes input to another
- **Use '$step_id' to reference previous step outputs**
## Available Functions
### Data Ingestion Functions
- 'create_data': Create literal data values
- 'read_url': Fetch content from a web URL
- 'read_file': Read uploaded file content
### AI Processing Functions
- 'ai_prompt': Apply an AI prompt to process data
- 'ai_extract': Extract structured data using AI with a schema
### Data Transformation Functions
- 'transform': Transform data (lowercase, extract_field, filter, flatten, unique, length)
- 'merge': Combine multiple data items (concatenate, list, first, last)
- 'split': Split data based on criteria
- 'map': Apply a function to each item in a list
- 'filter': Filter a list based on conditions
## API Endpoints
### 1. Create Pipeline
**POST** '/pipeline/create'
{
"name": "my_pipeline",
"steps": [
{
"id": "step1",
"function": "function_name",
"params": {
"param1": "value1",
"param2": "$previous_step_id"
}
}
]
}
### 2. Execute Pipeline
**POST** '/pipeline/{pipeline_id}/execute'
{
"input_data": {
"key1": "value1",
"key2": ["item1", "item2"]
}
}
### 3. Check Execution Status
**GET** '/execution/{execution_id}'
Returns execution status and results.
## Step-by-Step Usage Guide
### Step 1: Design Your Pipeline
Think about your task as a series of steps:
1. What data do you need? (URLs, files, literal values)
2. How should it be processed? (AI prompts, transformations)
3. How should results be combined or structured?
### Step 2: Create Pipeline Definition
Write a JSON definition with each step having:
- 'id': Unique identifier for this step
- 'function': Which function to use
- 'params': Parameters for the function (can reference previous steps with '$step_id')
### Step 3: Execute Pipeline
Send the pipeline definition to create it, then execute it with any input data.
## Common Patterns & Examples
### Pattern 1: Process Multiple URLs
{
"name": "multi_url_processing",
"steps": [
{
"id": "urls",
"function": "create_data",
"params": {"value": ["https://site1.com", "https://site2.com"]}
},
{
"id": "content",
"function": "map",
"params": {
"data": "$urls",
"operation": {
"function": "read_url",
"params": {"url": "$item"}
}
}
},
{
"id": "combined",
"function": "merge",
"params": {"data": "$content", "strategy": "concatenate"}
}
]
}
### Pattern 2: Extract Structured Data with AI
{
"id": "extract_people",
"function": "ai_extract",
"params": {
"data": "$text_content",
"schema": {
"type": "list",
"items": {
"name": "string",
"title": "string",
"company": "string",
"source_text": "string"
}
}
}
}
### Pattern 3: Process and Filter Data
{
"id": "filter_results",
"function": "filter",
"params": {
"data": "$extracted_data",
"condition": {
"field": "confidence",
"operator": "greater_than",
"value": 0.8
}
}
}
### Pattern 4: Transform Data
{
"id": "clean_text",
"function": "transform",
"params": {
"data": "$raw_text",
"operation": "lowercase"
}
}
## Function Reference
### create_data
**Purpose**: Create literal data values
{
"function": "create_data",
"params": {
"value": "any value - string, list, dict, etc."
}
}
### read_url
**Purpose**: Fetch content from a web URL
{
"function": "read_url",
"params": {
"url": "https://example.com"
}
}
### ai_prompt
**Purpose**: Process data with an AI prompt
{
"function": "ai_prompt",
"params": {
"data": "$input_data",
"prompt": "Summarize this text in 3 bullet points",
"model": "gpt-4"
}
}
### ai_extract
**Purpose**: Extract structured data using AI
{
"function": "ai_extract",
"params": {
"data": "$text_data",
"schema": {
"type": "list",
"items": {
"field1": "string",
"field2": "number"
}
}
}
}
### transform
**Purpose**: Transform data in various ways
{
"function": "transform",
"params": {
"data": "$input_data",
"operation": "lowercase|extract_field|filter|flatten|unique|length",
"field": "field_name", // for extract_field
"condition": {...} // for filter
}
}
### merge
**Purpose**: Combine multiple data items
{
"function": "merge",
"params": {
"data": ["$step1", "$step2", "$step3"],
"strategy": "concatenate|list|first|last"
}
}
### map
**Purpose**: Apply a function to each item in a list
{
"function": "map",
"params": {
"data": "$list_data",
"operation": {
"function": "function_name",
"params": {
"param1": "$item" // $item represents current list item
}
}
}
}
### filter
**Purpose**: Filter a list based on conditions
{
"function": "filter",
"params": {
"data": "$list_data",
"condition": {
"field": "field_name",
"operator": "equals|contains|greater_than|less_than",
"value": "comparison_value"
}
}
}
## Complete Example Workflows
### Example 1: Research Multiple Companies
**Task**: Get information about 3 companies from their websites and extract key facts
{
"name": "company_research",
"steps": [
{
"id": "companies",
"function": "create_data",
"params": {
"value": [
"https://apple.com/about",
"https://google.com/about",
"https://microsoft.com/about"
]
}
},
{
"id": "company_pages",
"function": "map",
"params": {
"data": "$companies",
"operation": {
"function": "read_url",
"params": {"url": "$item"}
}
}
},
{
"id": "company_facts",
"function": "map",
"params": {
"data": "$company_pages",
"operation": {
"function": "ai_extract",
"params": {
"data": "$item",
"schema": {
"type": "object",
"properties": {
"company_name": "string",
"founded_year": "number",
"headquarters": "string",
"main_products": "array",
"employee_count": "string"
}
}
}
}
}
}
]
}
### Example 2: Content Analysis Pipeline
**Task**: Analyze sentiment and extract topics from multiple blog posts
{
"name": "content_analysis",
"steps": [
{
"id": "blog_urls",
"function": "create_data",
"params": {
"value": [
"https://blog1.com/post1",
"https://blog2.com/post2"
]
}
},
{
"id": "blog_content",
"function": "map",
"params": {
"data": "$blog_urls",
"operation": {
"function": "read_url",
"params": {"url": "$item"}
}
}
},
{
"id": "sentiment_analysis",
"function": "map",
"params": {
"data": "$blog_content",
"operation": {
"function": "ai_prompt",
"params": {
"data": "$item",
"prompt": "Analyze the sentiment of this text. Return a JSON object with 'sentiment' (positive/negative/neutral), 'confidence' (0-1), and 'key_phrases' (array of important phrases)."
}
}
}
},
{
"id": "topic_extraction",
"function": "map",
"params": {
"data": "$blog_content",
"operation": {
"function": "ai_extract",
"params": {
"data": "$item",
"schema": {
"type": "array",
"items": {
"topic": "string",
"relevance_score": "number",
"keywords": "array"
}
}
}
}
}
},
{
"id": "final_report",
"function": "merge",
"params": {
"data": ["$sentiment_analysis", "$topic_extraction"],
"strategy": "list"
}
}
]
}
## Error Handling
- If a step fails, the entire pipeline stops and returns an error
- Check execution status with GET '/execution/{execution_id}'
- Error details will be in the response
## Best Practices
1. **Use descriptive step IDs**: Make them meaningful (e.g., "extract_emails" not "step3")
2. **Chain logically**: Each step should build on previous ones
3. **Reference correctly**: Always use '$step_id' format to reference previous steps
4. **Handle lists properly**: Use 'map' to process each item in a list individually
5. **Structure AI prompts clearly**: Be specific about desired output format
6. **Use appropriate merge strategies**:
- "concatenate" for combining text or lists
- "list" to keep items separate
- "first"/"last" to pick specific items
7. **Filter early**: Apply filters as soon as possible to reduce data processing
## Quick Start Checklist
1. ✅ Identify your data sources (URLs, files, literal values)
2. ✅ Plan your processing steps (AI prompts, transformations)
3. ✅ Write pipeline JSON with proper step references
4. ✅ POST to '/pipeline/create'
5. ✅ POST to '/pipeline/{id}/execute'
6. ✅ GET '/execution/{id}' to check results
Remember: Start simple and build complexity gradually. Test each step before adding the next one.
`;
export const INJECTED_PROMPT_1_EXP2 = stripIndents`[INJECTED_PROMPT_1]
Please review this API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.
As a reminder, all API endpoints are at https://staging.impromptu-labs.com .
## Available Functions
### Data Ingestion Functions
- 'create_data': Create literal data values
- 'read_url': Fetch content from a web URL
- 'read_file': Read uploaded file content
### AI Processing Functions
- 'ai_prompt': Apply an AI prompt to process data
- 'ai_extract': Extract structured data using AI with a schema
### Data Transformation Functions
- 'transform': Transform data (lowercase, extract_field, filter, flatten, unique, length)
- 'merge': Combine multiple data items (concatenate, list, first, last)
- 'split': Split data based on criteria
- 'map': Apply a function to each item in a list
- 'filter': Filter a list based on conditions
`;
export const INJECTED_PROMPT_1 = stripIndents`[INJECTED_PROMPT_1]
Please review this API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.
As a reminder, all API endpoints are at https://staging.impromptu-labs.com .
And remember the actual API functions you have access to, and what they expect:
### 1. Data Ingestion: '/input_data'
**Purpose**: Import data from strings, files, or URLs into the system
**Method**: POST
**Parameters**:
- 'created_object_name' (string): Name for the data object to create/append to
- 'data_type' (string): Either "strings", "files", or "urls"
- 'input_data' (list): List of strings, file data, or URLs to process
### 2. Data Processing: '/apply_prompt'
**Purpose**: Apply AI prompts to data combinations to generate new insights
**Method**: POST
**Parameters**:
- 'created_object_names' (list of strings): Names of new objects to create from results
- 'prompt_string' (string): Template with placeholders to match with input_object_name values.
- 'inputs' (list): Input specifications with object names and processing modes
**Processing Modes**:
- 'combine_events': Merge all data from an object into one combined text
- 'use_individually': Process each piece of data separately
- 'match_keys': Only combine data entries that share common tracking keys
### 3. Data Management
- 'GET /return_data/{object_name}': Retrieve a specific data object and everything that helped create it. returns a key called "data" that has all other objects under it. for example, a returned value could be:
{
'data': [{'key_list': ['c6785e9e-0854-48c2-b580-cc3071280701',
'b466ad42-384e-4211-9210-1d16de55e0f3'],
'value': ['https://url1.com/',
'https://url2.com/']
}],
'object_name': 'organization_urls'}
so please note that 'value' is already parsed properly, and will generally be an array already.
- 'DELETE /objects/{object_name}': Delete a data object by name
Ensure that the necessary input and output controls are present to allow the user to run this code, sending in what they need to at the time.
Please also echo the exact API calls to teh screen for debuging as they happen.
Also, please add a green button to show the results, and a red button that will delete the objects produced by the code.
`;
export const INJECTED_PROMPT_1_OLD = stripIndents`[INJECTED_PROMPT_1]
Please review this API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.
As a reminder, all API endpoints are at https://staging.impromptu-labs.com .
receive_text_from_files(object_name, files[])
Uploads one or more .txt, .pdf, .docx, .csv, .xlsx files. Extracts and stores content as rows { UID, data }.
receive_text_input(object_name, text_input)
Accepts raw text or a list of strings. Creates one row per entry with { UID, data }.
receive_structured_data(object_name, files[])
Uploads structured data. Each column becomes its own object, rows are linked via shared UID.
scrape_urls(object_name, urls)
Fetches visible text from each URL. Returns { UID, data, source_url }.
research_topics(object_name, topics)
Performs research using an AI agent for each topic. Saves responses as { UID, data, topic }.
use_prompt(object_name, prompts)
Sends each prompt to a GPT model. Stores outputs as { UID, data, prompt }.
enrich_with_research(input_objects, output_object, research_request, combine_all_objects)
Appends research to each row's data, or to combined data if combine_all_objects = true.
enrich_with_prompt(input_objects, output_object, prompt_template, combine_all_objects)
Applies a GPT prompt using {value} as a placeholder for row data. Returns LLM responses.
extract_details_from_text(input_objects, output_object, extract_elements[], combine_all_objects)
Extracts structured JSON objects from text. Returns one row per result: { UID, data = JSON string }.
return_data(object_name)
Returns all data rows for a named object. Returns one object with an element called "data" which is a list of json objects.
delete_object(object_name)
Deletes an object and all its data.
`;
// export const INJECTED_PROMPT_2 = stripIndents`[INJECTED_PROMPT_2] Rewrite the code using the Modernize Next-js Free design system:
// • Framework - Next.js 14 App Router + TypeScript
// • UI library - Material UI v5; style only with the sx prop and MUI components
// • Theme palette - primary #5D87FF, success #13DEB9, danger #FA896B, warning #FFAE1F
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main; keep shadow-1 cards and 12-col responsive grid
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • File structure - components in /package/src/components/, pages in /package/src/app/ with PascalCase files
// • Write all components as arrow functions, export default, and type props explicitly`;
// export const INJECTED_PROMPT_2 = stripIndents`[INJECTED_PROMPT_2] Rewrite the code using the followign design elements:
// • use React Dark mode, with clearly contrasting buttons and text
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • Interface - A button for each API-calling step, with an indicator that shows when each step is finished and the result.`;
export const INJECTED_PROMPT_2 = stripIndents`[INJECTED_PROMPT_2] Change the style of the app using the set of instructions below that are most relevant to the user task:
(For screens where users upload documents, extract structured data, and view outputs):
Generate a three-step Upload & Extract flow for seed-to-Series-B small-business brands.
• Step 1: File upload card with drag-and-drop and “Choose File” button, branded with some PRIMARY_COLOR of your choice.
• Step 2: Extraction progress screen showing a Tailwind-styled spinner, clear status message, and cancel option.
• Step 3: Data output table with Bootstrap table classes, sortable columns, and “Download CSV” button.
• Step 4: Ensure mobile breakpoints collapse sidebars into accordion panels; implement dark mode variants; include ARIA labels on all interactive elements.”
(For embedding a conversational AI widget into an existing portal):
Produce a chatbot UI panel that sits at bottom-right:
• Step 1: Minimal header bar with logo icon and “Help” label in some PRIMARY_COLOR of your choice.
• Step 2: Scrollable message window styled with alternating light/dark bubble backgrounds.
• Step 3: Input area with accessible placeholder text 'Ask me anything...', a send icon button, and an optional attachment button.
• Step 4: Ensure focus outlines, keyboard navigation, and proper aria-live regions for new messages.
• Step 5: Include a mobile view where the panel expands full-screen and a dark-mode toggle.”
(For workflows where users upload a document, enter a prompt themselves, then see a summary or output):
Design a three-column layout on desktop, single-column on mobile, for Upload + Prompt + Summary:
• Step 1: Upload Column: Drag-and-drop zone with dashed border and an upload progress bar.
• Step 2: Prompt Column: A text area with placeholder 'Enter instructions...', character count, and 'Run' button in green.
• Step 3: Summary Column: Shows AI-generated summary in a scrollable card with expandable sections.
• Step 4: Follow minimal luxe style: plenty of white space, 2xl rounded corners, soft shadows. Use Tailwind classes for spacing. Include dark-mode card variants. Add aria-describedby on summary sections.”
(General - for all workflows)
Do not use MUI icons, they break in this environment.
Please ensure that all text and windows have good contrast against their background.
Remember to re-install and run npm run dev after any changes.
`;

View File

@@ -5,7 +5,7 @@ import { Chat } from '~/components/chat/Chat.client';
import { Header } from '~/components/header/Header';
export const meta: MetaFunction = () => {
return [{ title: 'Bolt' }, { name: 'description', content: 'Talk with Bolt, an AI assistant from StackBlitz' }];
return [{ title: 'Emp2' }, { name: 'description', content: 'Talk with the AI assistant' }];
};
export const loader = () => json({});

View File

@@ -1,8 +1,11 @@
import { type ActionFunctionArgs } from '@remix-run/cloudflare';
import { MAX_RESPONSE_SEGMENTS, MAX_TOKENS } from '~/lib/.server/llm/constants';
import { CONTINUE_PROMPT } from '~/lib/.server/llm/prompts';
import { CONTINUE_PROMPT, API_CHATBOT_PROMPT, API_CHATBOT_PROMPT_EXP2, INJECTED_PROMPT_1 ,INJECTED_PROMPT_1_EXP2, INJECTED_PROMPT_2 } from '~/lib/.server/llm/prompts';
import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
import SwitchableStream from '~/lib/.server/llm/switchable-stream';
import { streamText as _streamText, convertToCoreMessages } from 'ai';
import { getAPIKey } from '~/lib/.server/llm/api-key';
import { getAnthropicModel } from '~/lib/.server/llm/model';
export async function action(args: ActionFunctionArgs) {
return chatAction(args);
@@ -10,37 +13,125 @@ export async function action(args: ActionFunctionArgs) {
async function chatAction({ context, request }: ActionFunctionArgs) {
const { messages } = await request.json<{ messages: Messages }>();
const stream = new SwitchableStream();
try {
const options: StreamingOptions = {
toolChoice: 'none',
onFinish: async ({ text: content, finishReason }) => {
if (finishReason !== 'length') {
return stream.close();
}
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant', content });
messages.push({ role: 'user', content: CONTINUE_PROMPT });
const result = await streamText(messages, context.cloudflare.env, options);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamText(messages, context.cloudflare.env, options);
stream.switchSource(result.toAIStream());
// Check if we've already transitioned to the original agent
const hasTransitioned = checkIfAlreadyTransitioned(messages);
if (!hasTransitioned) {
// Use your agent first
console.log('Using your agent...');
// Create options with proper stream closing and transition detection
const yourAgentOptions: StreamingOptions = {
onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
console.log('Your agent finished with reason:', finishReason);
// Check if we should transition to original agent
if (checkIfShouldTransition(content)) {
console.log('Transition detected! Immediately injecting first prompt...');
// Add the assistant's response to messages
const updatedMessages: Messages = [...messages, { role: 'assistant' as const, content }];
// Inject the first prompt immediately
const injectedMessages = injectSinglePrompt(updatedMessages, 1);
// Continue with original agent using injected prompt
const originalAgentOptions: StreamingOptions = {
toolChoice: 'none',
onFinish: async ({ text: responseContent, finishReason: responseFinishReason }: { text: string; finishReason: string }) => {
if (responseFinishReason !== 'length') {
// After first prompt response, inject second prompt immediately
console.log('First prompt response complete, injecting second prompt...');
const messagesWithFirstResponse: Messages = [...injectedMessages, { role: 'assistant' as const, content: responseContent }];
const secondInjectedMessages = injectSinglePrompt(messagesWithFirstResponse, 2);
// Continue with second prompt
const secondPromptOptions: StreamingOptions = {
toolChoice: 'none',
onFinish: async ({ text: finalContent, finishReason: finalFinishReason }: { text: string; finishReason: string }) => {
if (finalFinishReason !== 'length') {
return stream.close();
}
// Handle continuation for second prompt if needed
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
secondInjectedMessages.push({ role: 'assistant' as const, content: finalContent });
secondInjectedMessages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamText(secondInjectedMessages, context.cloudflare.env, secondPromptOptions);
return stream.switchSource(result.toAIStream());
},
};
const secondResult = await streamText(secondInjectedMessages, context.cloudflare.env, secondPromptOptions);
return stream.switchSource(secondResult.toAIStream());
}
// Handle continuation for first prompt if needed
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
injectedMessages.push({ role: 'assistant' as const, content: responseContent });
injectedMessages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamText(injectedMessages, context.cloudflare.env, originalAgentOptions);
return stream.switchSource(result.toAIStream());
},
};
const originalResult = await streamText(injectedMessages, context.cloudflare.env, originalAgentOptions);
return stream.switchSource(originalResult.toAIStream());
}
// No transition - close normally
if (finishReason !== 'length') {
console.log('Closing stream - your agent finished without transition');
return stream.close();
}
// Handle continuation for your agent
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant' as const, content });
messages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
stream.switchSource(result.toAIStream());
} else {
// We've already transitioned - normal original agent flow
console.log('Using original agent (already transitioned)...');
const options: StreamingOptions = {
toolChoice: 'none',
onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
if (finishReason !== 'length') {
return stream.close();
}
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
throw Error('Cannot continue message: Maximum segments reached');
}
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
messages.push({ role: 'assistant' as const, content });
messages.push({ role: 'user' as const, content: CONTINUE_PROMPT });
const result = await streamText(messages, context.cloudflare.env, options);
return stream.switchSource(result.toAIStream());
},
};
const result = await streamText(messages, context.cloudflare.env, options);
stream.switchSource(result.toAIStream());
}
return new Response(stream.readable, {
status: 200,
@@ -48,12 +139,583 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
contentType: 'text/plain; charset=utf-8',
},
});
} catch (error) {
console.log(error);
throw new Response(null, {
status: 500,
statusText: 'Internal Server Error',
});
}
}
// Simplified helper functions since we're handling injection inline now
function streamTextWithYourAgent(messages: Messages, env: Env, options?: StreamingOptions) {
return _streamText({
model: getAnthropicModel(getAPIKey(env)),
system: getYourAgentSystemPrompt(),
maxTokens: MAX_TOKENS,
headers: {
'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
},
messages: convertToCoreMessages(messages),
...options,
});
}
function getYourAgentSystemPrompt(): string {
// return API_CHATBOT_PROMPT;
return API_CHATBOT_PROMPT;
}
function checkIfAlreadyTransitioned(messages: Messages): boolean {
// Check if any assistant message contains [final] AND we have injected prompts after it
const hasTransitionMarker = messages.some(msg =>
msg.role === 'assistant' && msg.content.includes('[final]')
);
// If no transition marker, definitely not transitioned
if (!hasTransitionMarker) {
return false;
}
// Check if we have injected prompts (meaning we're in post-transition phase)
const hasInjectedPrompts = messages.some(msg =>
msg.role === 'user' && (
msg.content.includes('[INJECTED_PROMPT_1]') ||
msg.content.includes('[INJECTED_PROMPT_2]')
)
);
return hasInjectedPrompts;
}
function checkIfShouldTransition(responseText: string): boolean {
return responseText.includes('[final]');
}
function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
const injectedMessages = [...messages];
console.log(`Injecting prompt ${promptNumber} into messages`);
if (promptNumber === 1) {
injectedMessages.push({
role: 'user' as const,
// content: INJECTED_PROMPT_1 //'[INJECTED_PROMPT_1] Please review the API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.'
content: INJECTED_PROMPT_1 //
});
} else {
injectedMessages.push({
role: 'user' as const,
content: INJECTED_PROMPT_2
});
}
return injectedMessages;
}
//////////////////////////
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// // Check if we've already transitioned to the original agent
// const hasTransitioned = checkIfAlreadyTransitioned(messages);
// if (!hasTransitioned) {
// // Use your agent first
// console.log('Using your agent...');
// // Create options with proper stream closing
// const yourAgentOptions: StreamingOptions = {
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// console.log('Your agent finished with reason:', finishReason);
// console.log('Response content:', content.substring(0, 100) + '...');
// // Check if we should transition to original agent
// if (checkIfShouldTransition(content)) {
// console.log('Transition detected - will switch on next message');
// }
// // Always close the stream when your agent finishes
// // (unless we need continuation due to length)
// if (finishReason !== 'length') {
// console.log('Closing stream - your agent finished');
// return stream.close();
// }
// // Handle continuation for length
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env, yourAgentOptions);
// stream.switchSource(result.toAIStream());
// } else {
// // We've transitioned - check if we need to inject prompts
// const injectionStatus = checkIfNeedsPromptInjection(messages);
// if (injectionStatus.needsInjection) {
// console.log(`Injecting prompt ${injectionStatus.whichPrompt} before using original agent...`);
// // Inject the single prompt
// const injectedMessages = injectSinglePrompt(messages, injectionStatus.whichPrompt!);
// // Run through original agent with injected prompt
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// injectedMessages.push({ role: 'assistant', content });
// injectedMessages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// } else {
// // Normal original agent flow
// console.log('Using original agent...');
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// }
// }
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }
// // Updated helper function
// function streamTextWithYourAgent(messages: Messages, env: Env, options?: StreamingOptions) {
// return _streamText({
// model: getAnthropicModel(getAPIKey(env)),
// system: getYourAgentSystemPrompt(),
// maxTokens: MAX_TOKENS,
// headers: {
// 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
// },
// messages: convertToCoreMessages(messages),
// ...options, // This will include the onFinish callback we pass in
// });
// }
// function getYourAgentSystemPrompt(): string {
// // Return your custom system prompt
// // Include instruction to end with [final] when ready to transition
// return API_CHATBOT_PROMPT;;
// }
// function checkIfAlreadyTransitioned(messages: Messages): boolean {
// // Check if any assistant message contains [final]
// return messages.some(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// }
// function checkIfShouldTransition(responseText: string): boolean {
// return responseText.includes('[final]');
// }
// function checkIfNeedsPromptInjection(messages: Messages): { needsInjection: boolean; whichPrompt: 1 | 2 | null } {
// const transitionIndex = messages.findIndex(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// if (transitionIndex === -1) {
// console.log('No transition found, no injection needed');
// return { needsInjection: false, whichPrompt: null };
// }
// console.log('Transition found at index:', transitionIndex);
// // Check what we've already injected after transition
// const messagesAfterTransition = messages.slice(transitionIndex + 1);
// console.log('Messages after transition:', messagesAfterTransition.length);
// const prompt1Messages = messagesAfterTransition.filter(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const prompt2Messages = messagesAfterTransition.filter(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_2]')
// );
// console.log('Found prompt 1 messages:', prompt1Messages.length);
// console.log('Found prompt 2 messages:', prompt2Messages.length);
// if (prompt1Messages.length === 0) {
// console.log('Need to inject prompt 1');
// return { needsInjection: true, whichPrompt: 1 };
// } else if (prompt1Messages.length > 0 && prompt2Messages.length === 0) {
// // Check if we got a response to prompt 1
// const prompt1Index = messagesAfterTransition.findIndex(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasResponseToPrompt1 = messagesAfterTransition.slice(prompt1Index + 1).some(msg =>
// msg.role === 'assistant'
// );
// console.log('Has response to prompt 1:', hasResponseToPrompt1);
// if (hasResponseToPrompt1) {
// console.log('Need to inject prompt 2');
// return { needsInjection: true, whichPrompt: 2 };
// } else {
// console.log('Waiting for response to prompt 1');
// return { needsInjection: false, whichPrompt: null };
// }
// }
// console.log('All prompts already injected');
// return { needsInjection: false, whichPrompt: null };
// }
// function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
// const injectedMessages = [...messages];
// console.log('injecting a single prompt into messages:', messages);
// if (promptNumber === 1) {
// injectedMessages.push({
// role: 'user',
// content: '[INJECTED_PROMPT_1] Please review the API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.'
// });
// } else {
// injectedMessages.push({
// role: 'user',
// content: `[INJECTED_PROMPT_2] Rewrite the code using the Modernize Next-js Free design system:
// • Framework - Next.js 14 App Router + TypeScript
// • UI library - Material UI v5; style only with the sx prop and MUI components
// • Theme palette - primary #5D87FF, success #13DEB9, danger #FA896B, warning #FFAE1F
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main; keep shadow-1 cards and 12-col responsive grid
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • File structure - components in /package/src/components/, pages in /package/src/app/ with PascalCase files
// • Write all components as arrow functions, export default, and type props explicitly`
// });
// }
// return injectedMessages;
// }
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// // Check if we've already transitioned to the original agent
// const hasTransitioned = checkIfAlreadyTransitioned(messages);
// if (!hasTransitioned) {
// // Use your agent first
// console.log('Using your agent...');
// const result = await streamTextWithYourAgent(messages, context.cloudflare.env);
// // Collect the streamed response to check for [final] token
// let fullResponse = '';
// const responseStream = result.toAIStream();
// // We need to capture the response as it streams
// // This is a bit tricky with streaming - we might need to modify this approach
// const transformStream = new TransformStream({
// transform(chunk, controller) {
// const text = new TextDecoder().decode(chunk);
// fullResponse += text;
// controller.enqueue(chunk);
// },
// flush() {
// // After streaming is complete, check if we should transition
// if (checkIfShouldTransition(fullResponse)) {
// // We need to handle transition after this stream completes
// // This might require a different approach - see note below
// }
// }
// });
// // For now, let's return the stream and handle transition on next message
// stream.switchSource(responseStream);
// } else {
// // We've transitioned - check if we need to inject prompts
// const injectionStatus = checkIfNeedsPromptInjection(messages);
// if (injectionStatus.needsInjection) {
// console.log(`Injecting prompt ${injectionStatus.whichPrompt} before using original agent...`);
// // Inject the single prompt
// const injectedMessages = injectSinglePrompt(messages, injectionStatus.whichPrompt!);
// // Run through original agent with injected prompt
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// // Handle continuation logic (same as original)
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// injectedMessages.push({ role: 'assistant', content });
// injectedMessages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(injectedMessages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// } else {
// // Normal original agent flow
// console.log('Using original agent...');
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }: { text: string; finishReason: string }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// }
// }
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }
// // Helper functions
// function streamTextWithYourAgent(messages: Messages, env: Env, options?: StreamingOptions) {
// // For now, copy of the original streamText - you can modify later
// return _streamText({
// model: getAnthropicModel(getAPIKey(env)),
// system: getYourAgentSystemPrompt(), // You'll need to create this
// maxTokens: MAX_TOKENS,
// headers: {
// 'anthropic-beta': 'max-tokens-3-5-sonnet-2024-07-15',
// },
// messages: convertToCoreMessages(messages),
// ...options,
// });
// }
// function getYourAgentSystemPrompt(): string {
// // Return your custom system prompt
// // Include instruction to end with [final] when ready to transition
// return API_CHATBOT_PROMPT;
// }
// function checkIfAlreadyTransitioned(messages: Messages): boolean {
// // Check if any assistant message contains [final]
// return messages.some(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// }
// function checkIfShouldTransition(responseText: string): boolean {
// return responseText.includes('[final]');
// }
// function checkIfNeedsPromptInjection(messages: Messages): { needsInjection: boolean; whichPrompt: 1 | 2 | null } {
// const transitionIndex = messages.findIndex(msg =>
// msg.role === 'assistant' && msg.content.includes('[final]')
// );
// if (transitionIndex === -1) return { needsInjection: false, whichPrompt: null };
// // Check what we've already injected after transition
// const messagesAfterTransition = messages.slice(transitionIndex + 1);
// const hasPrompt1 = messagesAfterTransition.some(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasPrompt2 = messagesAfterTransition.some(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_2]')
// );
// if (!hasPrompt1) {
// return { needsInjection: true, whichPrompt: 1 };
// } else if (hasPrompt1 && !hasPrompt2) {
// // Check if we got a response to prompt 1
// const prompt1Index = messagesAfterTransition.findIndex(msg =>
// msg.role === 'user' && msg.content.includes('[INJECTED_PROMPT_1]')
// );
// const hasResponseToPrompt1 = messagesAfterTransition.slice(prompt1Index + 1).some(msg =>
// msg.role === 'assistant'
// );
// if (hasResponseToPrompt1) {
// return { needsInjection: true, whichPrompt: 2 };
// }
// }
// return { needsInjection: false, whichPrompt: null };
// }
// function injectSinglePrompt(messages: Messages, promptNumber: 1 | 2): Messages {
// const injectedMessages = [...messages];
// if (promptNumber === 1) {
// injectedMessages.push({
// role: 'user',
// content: '[INJECTED_PROMPT_1] Please review the API spec and be absolutely sure that you are calling those functions with the appropriate data formats, for example ensuring that you are sending object_name values, encapsulating input correctly in json, and using the exact function endpoints as they were defined.'
// });
// } else {
// injectedMessages.push({
// role: 'user',
// content: `[INJECTED_PROMPT_2] Rewrite the code using the Modernize Next-js Free design system:
// • Framework - Next.js 14 App Router + TypeScript
// • UI library - Material UI v5; style only with the sx prop and MUI components
// • Theme palette - primary #5D87FF, success #13DEB9, danger #FA896B, warning #FFAE1F
// • Layout - persistent 260 px left drawer + top AppBar + scrollable main; keep shadow-1 cards and 12-col responsive grid
// • Typography - Public Sans, 14 px base, 20 px h6, 32 px h4
// • File structure - components in /package/src/components/, pages in /package/src/app/ with PascalCase files
// • Write all components as arrow functions, export default, and type props explicitly`
// });
// }
// return injectedMessages;
// }
// async function chatAction({ context, request }: ActionFunctionArgs) {
// const { messages } = await request.json<{ messages: Messages }>();
// const stream = new SwitchableStream();
// try {
// const options: StreamingOptions = {
// toolChoice: 'none',
// onFinish: async ({ text: content, finishReason }) => {
// if (finishReason !== 'length') {
// return stream.close();
// }
// if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
// throw Error('Cannot continue message: Maximum segments reached');
// }
// const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
// console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
// messages.push({ role: 'assistant', content });
// messages.push({ role: 'user', content: CONTINUE_PROMPT });
// const result = await streamText(messages, context.cloudflare.env, options);
// return stream.switchSource(result.toAIStream());
// },
// };
// const result = await streamText(messages, context.cloudflare.env, options);
// stream.switchSource(result.toAIStream());
// return new Response(stream.readable, {
// status: 200,
// headers: {
// contentType: 'text/plain; charset=utf-8',
// },
// });
// } catch (error) {
// console.log(error);
// throw new Response(null, {
// status: 500,
// statusText: 'Internal Server Error',
// });
// }
// }

12
distributable/README.sh Normal file
View File

@@ -0,0 +1,12 @@
# Bolt.new - Docker App
## Prerequisites
- Docker installed (https://docs.docker.com/get-docker/)
## How to run:
1. Load the image and start the app
./setup_and_run.sh
2. Visit:
http://localhost:8080

9
distributable/setup_and_run.sh Executable file
View File

@@ -0,0 +1,9 @@
#! /bin/bash
# Step 0: Install Docker if necessary
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin
# Step 1: Unzip and load the docker image
sudo gunzip -c bolt-new.tar.gz | sudo docker load
# Step 2: Run the app
sudo docker run -p 8080:8080 bolt-new

11
docker-compose.yml Normal file
View File

@@ -0,0 +1,11 @@
version: "3"
services:
bolt:
build:
context: .
dockerfile: Dockerfile.dev
ports:
- "8080:8080"
volumes:
- .:/app
- /app/node_modules

View File

@@ -9,7 +9,7 @@
"scripts": {
"deploy": "npm run build && wrangler pages deploy",
"build": "remix vite:build",
"dev": "remix vite:dev",
"dev": "remix vite:dev --host",
"test": "vitest --run",
"test:watch": "vitest",
"lint": "eslint --cache --cache-location ./node_modules/.cache/eslint .",

5
set_up_docker_build.sh Executable file
View File

@@ -0,0 +1,5 @@
#! /bin/bash
sudo docker build -t bolt-new -f Dockerfile.dev --no-cache .
sudo docker save bolt-new > distributable/bolt-new.tar
cd distributable
sudo gzip bolt-new.tar

View File

@@ -10,6 +10,9 @@ export default defineConfig((config) => {
build: {
target: 'esnext',
},
server: {
port: 8080 // change here
},
plugins: [
nodePolyfills({
include: ['path', 'buffer'],