diff --git a/app/lib/.server/llm/openai-llm.ts b/app/lib/.server/llm/openai-llm.ts
index deb8a0c..621b376 100644
--- a/app/lib/.server/llm/openai-llm.ts
+++ b/app/lib/.server/llm/openai-llm.ts
@@ -28,23 +28,30 @@ export class OpenAILLM implements LLM {
}
const openai = createOpenAI({ apiKey: this.apiKey, compatibility: 'strict' });
- const model = openai('o1-mini');
+ type model_name_t = 'gpt-4o' | 'o1-mini';
+ const model_name: model_name_t = process.env.OPEN_AI_MODEL as model_name_t;
+ const model = openai(model_name);
- const o1sysmessage: Message = {
- role: 'user',
- content: this.getPrompts().getSystemPrompt()
- };
+ if (model_name === 'o1-mini') {
+ const o1sysmessage: Message = {
+ role: 'user',
+ content: this.getPrompts().getSystemPrompt()
+ };
- // this is just some jank to get o1 working, proof of concept.
- // for 4o, update the model above, remove the o1sysmessage, and set the system prompt and maxTokens
-
- return _streamText({
- model: model as any, // Use type assertion to bypass strict type checking
- // system: this.getPrompts().getSystemPrompt(),
- messages: [o1sysmessage, ...convertToCoreMessages(messages)],
- // maxTokens: MAX_TOKENS,
- ...options,
- });
+ return _streamText({
+ model: model as any, // Use type assertion to bypass strict type checking
+ messages: [o1sysmessage, ...convertToCoreMessages(messages)],
+ ...options,
+ });
+ } else {
+ return _streamText({
+ model: model as any, // Use type assertion to bypass strict type checking
+ system: this.getPrompts().getSystemPrompt(),
+ messages: convertToCoreMessages(messages),
+ maxTokens: MAX_TOKENS,
+ ...options,
+ });
+ }
}
getPrompts(): Prompts {
diff --git a/app/lib/.server/llm/openai-prompts.ts b/app/lib/.server/llm/openai-prompts.ts
index c15881f..a991d65 100644
--- a/app/lib/.server/llm/openai-prompts.ts
+++ b/app/lib/.server/llm/openai-prompts.ts
@@ -36,7 +36,7 @@ export class OpenAIPrompts implements Prompts {
- Use 2 spaces for code indentation
+ Use 2 spaces for code indentation.
@@ -46,8 +46,8 @@ export class OpenAIPrompts implements Prompts {
For user-modified files, a \`<${MODIFICATIONS_TAG_NAME}>\` section will appear at the start of the user message, containing either \`\` or \`\` elements for each modified file:
- - \`\`: Contains GNU unified diff format changes
- - \`\`: Contains the full new content of the file
+ - \`\`: Contains GNU unified diff format changes.
+ - \`\`: Contains the full new content of the file.
The system opts for \`\` if the diff exceeds the new content size; otherwise, it uses \`\`.
@@ -55,13 +55,13 @@ export class OpenAIPrompts implements Prompts {
- The header with original and modified file names is omitted!
- Changed sections start with @@ -X,Y +A,B @@ where:
- - X: Original file starting line
- - Y: Original file line count
- - A: Modified file starting line
- - B: Modified file line count
- - (-) lines: Removed from the original
- - (+) lines: Added in the modified version
- - Unmarked lines: Unchanged context
+ - X: Original file starting line.
+ - Y: Original file line count.
+ - A: Modified file starting line.
+ - B: Modified file line count.
+ - (-) lines: Removed from the original.
+ - (+) lines: Added in the modified version.
+ - Unmarked lines: Unchanged context.
Example:
@@ -84,15 +84,15 @@ export class OpenAIPrompts implements Prompts {
// full file content here
- ${MODIFICATIONS_TAG_NAME}>
+ ${MODIFICATIONS_TAG_NAME}>
Bolt generates a SINGLE, comprehensive artifact for each project. This artifact includes all necessary steps and components, such as:
- - Shell commands to execute, including dependencies to install via a package manager (NPM)
- - Files to create along with their contents
- - Folders to create if required
+ - Shell commands to execute, including dependencies to install via a package manager (NPM).
+ - Files to create along with their contents.
+ - Folders to create if required.
1. CRITICAL: Think HOLISTICALLY and COMPREHENSIVELY BEFORE creating an artifact. This means: