mirror of
https://github.com/stackblitz-labs/bolt.diy
synced 2025-01-22 10:55:34 +00:00
fix: updated logger and model caching minor bugfix #release (#895)
* fix: updated logger and model caching * usage token stream issue fix * minor changes * updated starter template change to fix the app title * starter template bigfix * fixed hydretion errors and raw logs * removed raw log * made auto select template false by default * more cleaner logs and updated logic to call dynamicModels only if not found in static models * updated starter template instructions * browser console log improved for firefox * provider icons fix icons
This commit is contained in:
parent
389eedcac4
commit
6494f5ac2e
@ -168,30 +168,32 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
}, []);
|
||||
|
||||
useEffect(() => {
|
||||
const providerSettings = getProviderSettings();
|
||||
let parsedApiKeys: Record<string, string> | undefined = {};
|
||||
if (typeof window !== 'undefined') {
|
||||
const providerSettings = getProviderSettings();
|
||||
let parsedApiKeys: Record<string, string> | undefined = {};
|
||||
|
||||
try {
|
||||
parsedApiKeys = getApiKeysFromCookies();
|
||||
setApiKeys(parsedApiKeys);
|
||||
} catch (error) {
|
||||
console.error('Error loading API keys from cookies:', error);
|
||||
try {
|
||||
parsedApiKeys = getApiKeysFromCookies();
|
||||
setApiKeys(parsedApiKeys);
|
||||
} catch (error) {
|
||||
console.error('Error loading API keys from cookies:', error);
|
||||
|
||||
// Clear invalid cookie data
|
||||
Cookies.remove('apiKeys');
|
||||
// Clear invalid cookie data
|
||||
Cookies.remove('apiKeys');
|
||||
}
|
||||
setIsModelLoading('all');
|
||||
initializeModelList({ apiKeys: parsedApiKeys, providerSettings })
|
||||
.then((modelList) => {
|
||||
// console.log('Model List: ', modelList);
|
||||
setModelList(modelList);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error initializing model list:', error);
|
||||
})
|
||||
.finally(() => {
|
||||
setIsModelLoading(undefined);
|
||||
});
|
||||
}
|
||||
setIsModelLoading('all');
|
||||
initializeModelList({ apiKeys: parsedApiKeys, providerSettings })
|
||||
.then((modelList) => {
|
||||
console.log('Model List: ', modelList);
|
||||
setModelList(modelList);
|
||||
})
|
||||
.catch((error) => {
|
||||
console.error('Error initializing model list:', error);
|
||||
})
|
||||
.finally(() => {
|
||||
setIsModelLoading(undefined);
|
||||
});
|
||||
}, [providerList]);
|
||||
|
||||
const onApiKeysChange = async (providerName: string, apiKey: string) => {
|
||||
@ -401,28 +403,32 @@ export const BaseChat = React.forwardRef<HTMLDivElement, BaseChatProps>(
|
||||
<rect className={classNames(styles.PromptShine)} x="48" y="24" width="70" height="1"></rect>
|
||||
</svg>
|
||||
<div>
|
||||
<div className={isModelSettingsCollapsed ? 'hidden' : ''}>
|
||||
<ModelSelector
|
||||
key={provider?.name + ':' + modelList.length}
|
||||
model={model}
|
||||
setModel={setModel}
|
||||
modelList={modelList}
|
||||
provider={provider}
|
||||
setProvider={setProvider}
|
||||
providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
|
||||
apiKeys={apiKeys}
|
||||
modelLoading={isModelLoading}
|
||||
/>
|
||||
{(providerList || []).length > 0 && provider && (
|
||||
<APIKeyManager
|
||||
provider={provider}
|
||||
apiKey={apiKeys[provider.name] || ''}
|
||||
setApiKey={(key) => {
|
||||
onApiKeysChange(provider.name, key);
|
||||
}}
|
||||
/>
|
||||
<ClientOnly>
|
||||
{() => (
|
||||
<div className={isModelSettingsCollapsed ? 'hidden' : ''}>
|
||||
<ModelSelector
|
||||
key={provider?.name + ':' + modelList.length}
|
||||
model={model}
|
||||
setModel={setModel}
|
||||
modelList={modelList}
|
||||
provider={provider}
|
||||
setProvider={setProvider}
|
||||
providerList={providerList || (PROVIDER_LIST as ProviderInfo[])}
|
||||
apiKeys={apiKeys}
|
||||
modelLoading={isModelLoading}
|
||||
/>
|
||||
{(providerList || []).length > 0 && provider && (
|
||||
<APIKeyManager
|
||||
provider={provider}
|
||||
apiKey={apiKeys[provider.name] || ''}
|
||||
setApiKey={(key) => {
|
||||
onApiKeysChange(provider.name, key);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</ClientOnly>
|
||||
</div>
|
||||
<FilePreview
|
||||
files={uploadedFiles}
|
||||
|
@ -168,7 +168,8 @@ export const ChatImpl = memo(
|
||||
});
|
||||
useEffect(() => {
|
||||
const prompt = searchParams.get('prompt');
|
||||
console.log(prompt, searchParams, model, provider);
|
||||
|
||||
// console.log(prompt, searchParams, model, provider);
|
||||
|
||||
if (prompt) {
|
||||
setSearchParams({});
|
||||
@ -289,14 +290,14 @@ export const ChatImpl = memo(
|
||||
|
||||
// reload();
|
||||
|
||||
const template = await selectStarterTemplate({
|
||||
const { template, title } = await selectStarterTemplate({
|
||||
message: messageInput,
|
||||
model,
|
||||
provider,
|
||||
});
|
||||
|
||||
if (template !== 'blank') {
|
||||
const temResp = await getTemplates(template);
|
||||
const temResp = await getTemplates(template, title);
|
||||
|
||||
if (temResp) {
|
||||
const { assistantMessage, userMessage } = temResp;
|
||||
|
@ -6,9 +6,10 @@ import type { IProviderConfig } from '~/types/model';
|
||||
import { logStore } from '~/lib/stores/logs';
|
||||
|
||||
// Import a default fallback icon
|
||||
import DefaultIcon from '/icons/Default.svg'; // Adjust the path as necessary
|
||||
import { providerBaseUrlEnvKeys } from '~/utils/constants';
|
||||
|
||||
const DefaultIcon = '/icons/Default.svg'; // Adjust the path as necessary
|
||||
|
||||
export default function ProvidersTab() {
|
||||
const { providers, updateProviderSettings, isLocalModel } = useSettings();
|
||||
const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]);
|
||||
|
@ -5,7 +5,6 @@ import { renderToReadableStream } from 'react-dom/server';
|
||||
import { renderHeadToString } from 'remix-island';
|
||||
import { Head } from './root';
|
||||
import { themeStore } from '~/lib/stores/theme';
|
||||
import { initializeModelList } from '~/utils/constants';
|
||||
|
||||
export default async function handleRequest(
|
||||
request: Request,
|
||||
@ -14,7 +13,7 @@ export default async function handleRequest(
|
||||
remixContext: EntryContext,
|
||||
_loadContext: AppLoadContext,
|
||||
) {
|
||||
await initializeModelList({});
|
||||
// await initializeModelList({});
|
||||
|
||||
const readable = await renderToReadableStream(<RemixServer context={remixContext} url={request.url} />, {
|
||||
signal: request.signal,
|
||||
|
@ -4,7 +4,6 @@ import { getSystemPrompt } from '~/lib/common/prompts/prompts';
|
||||
import {
|
||||
DEFAULT_MODEL,
|
||||
DEFAULT_PROVIDER,
|
||||
getModelList,
|
||||
MODEL_REGEX,
|
||||
MODIFICATIONS_TAG_NAME,
|
||||
PROVIDER_LIST,
|
||||
@ -15,6 +14,8 @@ import ignore from 'ignore';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { PromptLibrary } from '~/lib/common/prompt-library';
|
||||
import { allowedHTMLElements } from '~/utils/markdown';
|
||||
import { LLMManager } from '~/lib/modules/llm/manager';
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
|
||||
interface ToolResult<Name extends string, Args, Result> {
|
||||
toolCallId: string;
|
||||
@ -142,6 +143,8 @@ function extractPropertiesFromMessage(message: Message): { model: string; provid
|
||||
return { model, provider, content: cleanedContent };
|
||||
}
|
||||
|
||||
const logger = createScopedLogger('stream-text');
|
||||
|
||||
export async function streamText(props: {
|
||||
messages: Messages;
|
||||
env: Env;
|
||||
@ -158,15 +161,10 @@ export async function streamText(props: {
|
||||
|
||||
let currentModel = DEFAULT_MODEL;
|
||||
let currentProvider = DEFAULT_PROVIDER.name;
|
||||
const MODEL_LIST = await getModelList({ apiKeys, providerSettings, serverEnv: serverEnv as any });
|
||||
const processedMessages = messages.map((message) => {
|
||||
if (message.role === 'user') {
|
||||
const { model, provider, content } = extractPropertiesFromMessage(message);
|
||||
|
||||
if (MODEL_LIST.find((m) => m.name === model)) {
|
||||
currentModel = model;
|
||||
}
|
||||
|
||||
currentModel = model;
|
||||
currentProvider = provider;
|
||||
|
||||
return { ...message, content };
|
||||
@ -183,12 +181,37 @@ export async function streamText(props: {
|
||||
return message;
|
||||
});
|
||||
|
||||
const modelDetails = MODEL_LIST.find((m) => m.name === currentModel);
|
||||
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
|
||||
const staticModels = LLMManager.getInstance().getStaticModelListFromProvider(provider);
|
||||
let modelDetails = staticModels.find((m) => m.name === currentModel);
|
||||
|
||||
if (!modelDetails) {
|
||||
const modelsList = [
|
||||
...(provider.staticModels || []),
|
||||
...(await LLMManager.getInstance().getModelListFromProvider(provider, {
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv: serverEnv as any,
|
||||
})),
|
||||
];
|
||||
|
||||
if (!modelsList.length) {
|
||||
throw new Error(`No models found for provider ${provider.name}`);
|
||||
}
|
||||
|
||||
modelDetails = modelsList.find((m) => m.name === currentModel);
|
||||
|
||||
if (!modelDetails) {
|
||||
// Fallback to first model
|
||||
logger.warn(
|
||||
`MODEL [${currentModel}] not found in provider [${provider.name}]. Falling back to first model. ${modelsList[0].name}`,
|
||||
);
|
||||
modelDetails = modelsList[0];
|
||||
}
|
||||
}
|
||||
|
||||
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
|
||||
|
||||
const provider = PROVIDER_LIST.find((p) => p.name === currentProvider) || DEFAULT_PROVIDER;
|
||||
|
||||
let systemPrompt =
|
||||
PromptLibrary.getPropmtFromLibrary(promptId || 'default', {
|
||||
cwd: WORK_DIR,
|
||||
@ -201,6 +224,8 @@ export async function streamText(props: {
|
||||
systemPrompt = `${systemPrompt}\n\n ${codeContext}`;
|
||||
}
|
||||
|
||||
logger.info(`Sending llm call to ${provider.name} with model ${modelDetails.name}`);
|
||||
|
||||
return _streamText({
|
||||
model: provider.getModelInstance({
|
||||
model: currentModel,
|
||||
|
@ -8,6 +8,10 @@ export abstract class BaseProvider implements ProviderInfo {
|
||||
abstract name: string;
|
||||
abstract staticModels: ModelInfo[];
|
||||
abstract config: ProviderConfig;
|
||||
cachedDynamicModels?: {
|
||||
cacheId: string;
|
||||
models: ModelInfo[];
|
||||
};
|
||||
|
||||
getApiKeyLink?: string;
|
||||
labelForGetApiKey?: string;
|
||||
@ -49,6 +53,54 @@ export abstract class BaseProvider implements ProviderInfo {
|
||||
apiKey,
|
||||
};
|
||||
}
|
||||
getModelsFromCache(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}): ModelInfo[] | null {
|
||||
if (!this.cachedDynamicModels) {
|
||||
// console.log('no dynamic models',this.name);
|
||||
return null;
|
||||
}
|
||||
|
||||
const cacheKey = this.cachedDynamicModels.cacheId;
|
||||
const generatedCacheKey = this.getDynamicModelsCacheKey(options);
|
||||
|
||||
if (cacheKey !== generatedCacheKey) {
|
||||
// console.log('cache key mismatch',this.name,cacheKey,generatedCacheKey);
|
||||
this.cachedDynamicModels = undefined;
|
||||
return null;
|
||||
}
|
||||
|
||||
return this.cachedDynamicModels.models;
|
||||
}
|
||||
getDynamicModelsCacheKey(options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
}) {
|
||||
return JSON.stringify({
|
||||
apiKeys: options.apiKeys?.[this.name],
|
||||
providerSettings: options.providerSettings?.[this.name],
|
||||
serverEnv: options.serverEnv,
|
||||
});
|
||||
}
|
||||
storeDynamicModels(
|
||||
options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
},
|
||||
models: ModelInfo[],
|
||||
) {
|
||||
const cacheId = this.getDynamicModelsCacheKey(options);
|
||||
|
||||
// console.log('caching dynamic models',this.name,cacheId);
|
||||
this.cachedDynamicModels = {
|
||||
cacheId,
|
||||
models,
|
||||
};
|
||||
}
|
||||
|
||||
// Declare the optional getDynamicModels method
|
||||
getDynamicModels?(
|
||||
|
@ -2,7 +2,9 @@ import type { IProviderSetting } from '~/types/model';
|
||||
import { BaseProvider } from './base-provider';
|
||||
import type { ModelInfo, ProviderInfo } from './types';
|
||||
import * as providers from './registry';
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
|
||||
const logger = createScopedLogger('LLMManager');
|
||||
export class LLMManager {
|
||||
private static _instance: LLMManager;
|
||||
private _providers: Map<string, BaseProvider> = new Map();
|
||||
@ -40,22 +42,22 @@ export class LLMManager {
|
||||
try {
|
||||
this.registerProvider(provider);
|
||||
} catch (error: any) {
|
||||
console.log('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
||||
logger.warn('Failed To Register Provider: ', provider.name, 'error:', error.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error registering providers:', error);
|
||||
logger.error('Error registering providers:', error);
|
||||
}
|
||||
}
|
||||
|
||||
registerProvider(provider: BaseProvider) {
|
||||
if (this._providers.has(provider.name)) {
|
||||
console.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
||||
logger.warn(`Provider ${provider.name} is already registered. Skipping.`);
|
||||
return;
|
||||
}
|
||||
|
||||
console.log('Registering Provider: ', provider.name);
|
||||
logger.info('Registering Provider: ', provider.name);
|
||||
this._providers.set(provider.name, provider);
|
||||
this._modelList = [...this._modelList, ...provider.staticModels];
|
||||
}
|
||||
@ -93,12 +95,28 @@ export class LLMManager {
|
||||
(provider): provider is BaseProvider & Required<Pick<ProviderInfo, 'getDynamicModels'>> =>
|
||||
!!provider.getDynamicModels,
|
||||
)
|
||||
.map((provider) =>
|
||||
provider.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv).catch((err) => {
|
||||
console.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
}),
|
||||
),
|
||||
.map(async (provider) => {
|
||||
const cachedModels = provider.getModelsFromCache(options);
|
||||
|
||||
if (cachedModels) {
|
||||
return cachedModels;
|
||||
}
|
||||
|
||||
const dynamicModels = await provider
|
||||
.getDynamicModels(apiKeys, providerSettings?.[provider.name], serverEnv)
|
||||
.then((models) => {
|
||||
logger.info(`Caching ${models.length} dynamic models for ${provider.name}`);
|
||||
provider.storeDynamicModels(options, models);
|
||||
|
||||
return models;
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
});
|
||||
|
||||
return dynamicModels;
|
||||
}),
|
||||
);
|
||||
|
||||
// Combine static and dynamic models
|
||||
@ -110,6 +128,68 @@ export class LLMManager {
|
||||
|
||||
return modelList;
|
||||
}
|
||||
getStaticModelList() {
|
||||
return [...this._providers.values()].flatMap((p) => p.staticModels || []);
|
||||
}
|
||||
async getModelListFromProvider(
|
||||
providerArg: BaseProvider,
|
||||
options: {
|
||||
apiKeys?: Record<string, string>;
|
||||
providerSettings?: Record<string, IProviderSetting>;
|
||||
serverEnv?: Record<string, string>;
|
||||
},
|
||||
): Promise<ModelInfo[]> {
|
||||
const provider = this._providers.get(providerArg.name);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error(`Provider ${providerArg.name} not found`);
|
||||
}
|
||||
|
||||
const staticModels = provider.staticModels || [];
|
||||
|
||||
if (!provider.getDynamicModels) {
|
||||
return staticModels;
|
||||
}
|
||||
|
||||
const { apiKeys, providerSettings, serverEnv } = options;
|
||||
|
||||
const cachedModels = provider.getModelsFromCache({
|
||||
apiKeys,
|
||||
providerSettings,
|
||||
serverEnv,
|
||||
});
|
||||
|
||||
if (cachedModels) {
|
||||
logger.info(`Found ${cachedModels.length} cached models for ${provider.name}`);
|
||||
return [...cachedModels, ...staticModels];
|
||||
}
|
||||
|
||||
logger.info(`Getting dynamic models for ${provider.name}`);
|
||||
|
||||
const dynamicModels = await provider
|
||||
.getDynamicModels?.(apiKeys, providerSettings?.[provider.name], serverEnv)
|
||||
.then((models) => {
|
||||
logger.info(`Got ${models.length} dynamic models for ${provider.name}`);
|
||||
provider.storeDynamicModels(options, models);
|
||||
|
||||
return models;
|
||||
})
|
||||
.catch((err) => {
|
||||
logger.error(`Error getting dynamic models ${provider.name} :`, err);
|
||||
return [];
|
||||
});
|
||||
|
||||
return [...dynamicModels, ...staticModels];
|
||||
}
|
||||
getStaticModelListFromProvider(providerArg: BaseProvider) {
|
||||
const provider = this._providers.get(providerArg.name);
|
||||
|
||||
if (!provider) {
|
||||
throw new Error(`Provider ${providerArg.name} not found`);
|
||||
}
|
||||
|
||||
return [...(provider.staticModels || [])];
|
||||
}
|
||||
|
||||
getDefaultProvider(): BaseProvider {
|
||||
const firstProvider = this._providers.values().next().value;
|
||||
|
@ -25,6 +25,30 @@ export default class HuggingFaceProvider extends BaseProvider {
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'Qwen/Qwen2.5-72B-Instruct',
|
||||
label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
@ -37,6 +61,24 @@ export default class HuggingFaceProvider extends BaseProvider {
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: '01-ai/Yi-1.5-34B-Chat',
|
||||
label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
{
|
||||
name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
provider: 'HuggingFace',
|
||||
maxTokenAllowed: 8000,
|
||||
},
|
||||
];
|
||||
|
||||
getModelInstance(options: {
|
||||
|
@ -50,40 +50,35 @@ export default class HyperbolicProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: '',
|
||||
defaultApiTokenKey: 'HYPERBOLIC_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.hyperbolic.xyz/v1';
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: m.context_length || 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.error('Error getting Hyperbolic models:', error.message);
|
||||
return [];
|
||||
if (!apiKey) {
|
||||
throw `Missing Api Key configuration for ${this.name} provider`;
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
const data = res.data.filter((model: any) => model.object === 'model' && model.supports_chat);
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.id} - context ${m.context_length ? Math.floor(m.context_length / 1000) + 'k' : 'N/A'}`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: m.context_length || 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
@ -103,8 +98,7 @@ export default class HyperbolicProvider extends BaseProvider {
|
||||
});
|
||||
|
||||
if (!apiKey) {
|
||||
console.log(`Missing configuration for ${this.name} provider`);
|
||||
throw new Error(`Missing configuration for ${this.name} provider`);
|
||||
throw `Missing Api Key configuration for ${this.name} provider`;
|
||||
}
|
||||
|
||||
const openai = createOpenAI({
|
||||
|
@ -22,33 +22,27 @@ export default class LMStudioProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as { data: Array<{ id: string }> };
|
||||
|
||||
return data.data.map((model) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.log('Error getting LMStudio models:', error.message);
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'LMSTUDIO_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/v1/models`);
|
||||
const data = (await response.json()) as { data: Array<{ id: string }> };
|
||||
|
||||
return data.data.map((model) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
|
@ -45,34 +45,29 @@ export default class OllamaProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
const { baseUrl } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OLLAMA_API_BASE_URL',
|
||||
defaultApiTokenKey: '',
|
||||
});
|
||||
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
// console.log({ ollamamodels: data.models });
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (e) {
|
||||
console.error('Failed to get Ollama models:', e);
|
||||
if (!baseUrl) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/api/tags`);
|
||||
const data = (await response.json()) as OllamaApiResponse;
|
||||
|
||||
// console.log({ ollamamodels: data.models });
|
||||
|
||||
return data.models.map((model: OllamaModel) => ({
|
||||
name: model.name,
|
||||
label: `${model.name} (${model.details.parameter_size})`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
getModelInstance: (options: {
|
||||
model: string;
|
||||
|
@ -27,7 +27,6 @@ export default class OpenRouterProvider extends BaseProvider {
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{
|
||||
name: 'anthropic/claude-3.5-sonnet',
|
||||
label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
|
@ -19,37 +19,32 @@ export default class OpenAILikeProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
const { baseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'OPENAI_LIKE_API_BASE_URL',
|
||||
defaultApiTokenKey: 'OPENAI_LIKE_API_KEY',
|
||||
});
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error) {
|
||||
console.error('Error getting OpenAILike models:', error);
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
|
||||
return res.data.map((model: any) => ({
|
||||
name: model.id,
|
||||
label: model.id,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
|
@ -13,6 +13,7 @@ export default class OpenAIProvider extends BaseProvider {
|
||||
};
|
||||
|
||||
staticModels: ModelInfo[] = [
|
||||
{ name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
{ name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
|
@ -38,41 +38,36 @@ export default class TogetherProvider extends BaseProvider {
|
||||
settings?: IProviderSetting,
|
||||
serverEnv: Record<string, string> = {},
|
||||
): Promise<ModelInfo[]> {
|
||||
try {
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
||||
const { baseUrl: fetchBaseUrl, apiKey } = this.getProviderBaseUrlAndKey({
|
||||
apiKeys,
|
||||
providerSettings: settings,
|
||||
serverEnv,
|
||||
defaultBaseUrlKey: 'TOGETHER_API_BASE_URL',
|
||||
defaultApiTokenKey: 'TOGETHER_API_KEY',
|
||||
});
|
||||
const baseUrl = fetchBaseUrl || 'https://api.together.xyz/v1';
|
||||
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// console.log({ baseUrl, apiKey });
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
const data = (res || []).filter((model: any) => model.type === 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
} catch (error: any) {
|
||||
console.error('Error getting Together models:', error.message);
|
||||
if (!baseUrl || !apiKey) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// console.log({ baseUrl, apiKey });
|
||||
|
||||
const response = await fetch(`${baseUrl}/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${apiKey}`,
|
||||
},
|
||||
});
|
||||
|
||||
const res = (await response.json()) as any;
|
||||
const data = (res || []).filter((model: any) => model.type === 'chat');
|
||||
|
||||
return data.map((m: any) => ({
|
||||
name: m.id,
|
||||
label: `${m.display_name} - in:$${m.pricing.input.toFixed(2)} out:$${m.pricing.output.toFixed(2)} - context ${Math.floor(m.context_length / 1000)}k`,
|
||||
provider: this.name,
|
||||
maxTokenAllowed: 8000,
|
||||
}));
|
||||
}
|
||||
|
||||
getModelInstance(options: {
|
||||
|
@ -55,7 +55,8 @@ interface MessageState {
|
||||
function cleanoutMarkdownSyntax(content: string) {
|
||||
const codeBlockRegex = /^\s*```\w*\n([\s\S]*?)\n\s*```\s*$/;
|
||||
const match = content.match(codeBlockRegex);
|
||||
console.log('matching', !!match, content);
|
||||
|
||||
// console.log('matching', !!match, content);
|
||||
|
||||
if (match) {
|
||||
return match[1]; // Remove common leading 4-space indent
|
||||
|
@ -54,5 +54,5 @@ export const promptStore = atom<string>('default');
|
||||
|
||||
export const latestBranchStore = atom(false);
|
||||
|
||||
export const autoSelectStarterTemplate = atom(true);
|
||||
export const autoSelectStarterTemplate = atom(false);
|
||||
export const enableContextOptimizationStore = atom(false);
|
||||
|
@ -5,11 +5,14 @@ import { CONTINUE_PROMPT } from '~/lib/common/prompts/prompts';
|
||||
import { streamText, type Messages, type StreamingOptions } from '~/lib/.server/llm/stream-text';
|
||||
import SwitchableStream from '~/lib/.server/llm/switchable-stream';
|
||||
import type { IProviderSetting } from '~/types/model';
|
||||
import { createScopedLogger } from '~/utils/logger';
|
||||
|
||||
export async function action(args: ActionFunctionArgs) {
|
||||
return chatAction(args);
|
||||
}
|
||||
|
||||
const logger = createScopedLogger('api.chat');
|
||||
|
||||
function parseCookies(cookieHeader: string): Record<string, string> {
|
||||
const cookies: Record<string, string> = {};
|
||||
|
||||
@ -54,7 +57,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
|
||||
const options: StreamingOptions = {
|
||||
toolChoice: 'none',
|
||||
onFinish: async ({ text: content, finishReason, usage }) => {
|
||||
console.log('usage', usage);
|
||||
logger.debug('usage', JSON.stringify(usage));
|
||||
|
||||
if (usage) {
|
||||
cumulativeUsage.completionTokens += usage.completionTokens || 0;
|
||||
@ -63,23 +66,33 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
|
||||
}
|
||||
|
||||
if (finishReason !== 'length') {
|
||||
return stream
|
||||
.switchSource(
|
||||
createDataStream({
|
||||
async execute(dataStream) {
|
||||
dataStream.writeMessageAnnotation({
|
||||
type: 'usage',
|
||||
value: {
|
||||
completionTokens: cumulativeUsage.completionTokens,
|
||||
promptTokens: cumulativeUsage.promptTokens,
|
||||
totalTokens: cumulativeUsage.totalTokens,
|
||||
},
|
||||
});
|
||||
const encoder = new TextEncoder();
|
||||
const usageStream = createDataStream({
|
||||
async execute(dataStream) {
|
||||
dataStream.writeMessageAnnotation({
|
||||
type: 'usage',
|
||||
value: {
|
||||
completionTokens: cumulativeUsage.completionTokens,
|
||||
promptTokens: cumulativeUsage.promptTokens,
|
||||
totalTokens: cumulativeUsage.totalTokens,
|
||||
},
|
||||
onError: (error: any) => `Custom error: ${error.message}`,
|
||||
}),
|
||||
)
|
||||
.then(() => stream.close());
|
||||
});
|
||||
},
|
||||
onError: (error: any) => `Custom error: ${error.message}`,
|
||||
}).pipeThrough(
|
||||
new TransformStream({
|
||||
transform: (chunk, controller) => {
|
||||
// Convert the string stream to a byte stream
|
||||
const str = typeof chunk === 'string' ? chunk : JSON.stringify(chunk);
|
||||
controller.enqueue(encoder.encode(str));
|
||||
},
|
||||
}),
|
||||
);
|
||||
await stream.switchSource(usageStream);
|
||||
await new Promise((resolve) => setTimeout(resolve, 0));
|
||||
stream.close();
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (stream.switches >= MAX_RESPONSE_SEGMENTS) {
|
||||
@ -88,7 +101,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
|
||||
|
||||
const switchesLeft = MAX_RESPONSE_SEGMENTS - stream.switches;
|
||||
|
||||
console.log(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
|
||||
logger.info(`Reached max token limit (${MAX_TOKENS}): Continuing message (${switchesLeft} switches left)`);
|
||||
|
||||
messages.push({ role: 'assistant', content });
|
||||
messages.push({ role: 'user', content: CONTINUE_PROMPT });
|
||||
@ -104,7 +117,9 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
|
||||
contextOptimization,
|
||||
});
|
||||
|
||||
return stream.switchSource(result.toDataStream());
|
||||
stream.switchSource(result.toDataStream());
|
||||
|
||||
return;
|
||||
},
|
||||
};
|
||||
|
||||
@ -128,7 +143,7 @@ async function chatAction({ context, request }: ActionFunctionArgs) {
|
||||
},
|
||||
});
|
||||
} catch (error: any) {
|
||||
console.error(error);
|
||||
logger.error(error);
|
||||
|
||||
if (error.message?.includes('API key')) {
|
||||
throw new Response('Invalid or missing API key', {
|
||||
|
@ -19,312 +19,6 @@ export const DEFAULT_PROVIDER = llmManager.getDefaultProvider();
|
||||
|
||||
let MODEL_LIST = llmManager.getModelList();
|
||||
|
||||
/*
|
||||
*const PROVIDER_LIST_OLD: ProviderInfo[] = [
|
||||
* {
|
||||
* name: 'Anthropic',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'claude-3-5-sonnet-latest',
|
||||
* label: 'Claude 3.5 Sonnet (new)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'claude-3-5-sonnet-20240620',
|
||||
* label: 'Claude 3.5 Sonnet (old)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'claude-3-5-haiku-latest',
|
||||
* label: 'Claude 3.5 Haiku (new)',
|
||||
* provider: 'Anthropic',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'claude-3-opus-latest', label: 'Claude 3 Opus', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* { name: 'claude-3-sonnet-20240229', label: 'Claude 3 Sonnet', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* { name: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku', provider: 'Anthropic', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.anthropic.com/settings/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Ollama',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getOllamaModels,
|
||||
* getApiKeyLink: 'https://ollama.com/download',
|
||||
* labelForGetApiKey: 'Download Ollama',
|
||||
* icon: 'i-ph:cloud-arrow-down',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenAILike',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getOpenAILikeModels,
|
||||
* },
|
||||
* {
|
||||
* name: 'Cohere',
|
||||
* staticModels: [
|
||||
* { name: 'command-r-plus-08-2024', label: 'Command R plus Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r-08-2024', label: 'Command R Latest', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r-plus', label: 'Command R plus', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-r', label: 'Command R', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command', label: 'Command', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-nightly', label: 'Command Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-light', label: 'Command Light', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'command-light-nightly', label: 'Command Light Nightly', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'c4ai-aya-expanse-8b', label: 'c4AI Aya Expanse 8b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* { name: 'c4ai-aya-expanse-32b', label: 'c4AI Aya Expanse 32b', provider: 'Cohere', maxTokenAllowed: 4096 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://dashboard.cohere.com/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenRouter',
|
||||
* staticModels: [
|
||||
* { name: 'gpt-4o', label: 'GPT-4o', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* {
|
||||
* name: 'anthropic/claude-3.5-sonnet',
|
||||
* label: 'Anthropic: Claude 3.5 Sonnet (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'anthropic/claude-3-haiku',
|
||||
* label: 'Anthropic: Claude 3 Haiku (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'deepseek/deepseek-coder',
|
||||
* label: 'Deepseek-Coder V2 236B (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'google/gemini-flash-1.5',
|
||||
* label: 'Google Gemini Flash 1.5 (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'google/gemini-pro-1.5',
|
||||
* label: 'Google Gemini Pro 1.5 (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'x-ai/grok-beta', label: 'xAI Grok Beta (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 8000 },
|
||||
* {
|
||||
* name: 'mistralai/mistral-nemo',
|
||||
* label: 'OpenRouter Mistral Nemo (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'qwen/qwen-110b-chat',
|
||||
* label: 'OpenRouter Qwen 110b Chat (OpenRouter)',
|
||||
* provider: 'OpenRouter',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter', maxTokenAllowed: 4096 },
|
||||
* ],
|
||||
* getDynamicModels: getOpenRouterModels,
|
||||
* getApiKeyLink: 'https://openrouter.ai/settings/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Google',
|
||||
* staticModels: [
|
||||
* { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-1.5-pro-002', label: 'Gemini 1.5 Pro-002', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* { name: 'gemini-exp-1206', label: 'Gemini exp-1206', provider: 'Google', maxTokenAllowed: 8192 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://aistudio.google.com/app/apikey',
|
||||
* },
|
||||
* {
|
||||
* name: 'Groq',
|
||||
* staticModels: [
|
||||
* { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-90b-vision-preview', label: 'Llama 3.2 90b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-3b-preview', label: 'Llama 3.2 3b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.2-1b-preview', label: 'Llama 3.2 1b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* { name: 'llama-3.3-70b-versatile', label: 'Llama 3.3 70b (Groq)', provider: 'Groq', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.groq.com/keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'HuggingFace',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: '01-ai/Yi-1.5-34B-Chat',
|
||||
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen2.5-Coder-32B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-72B-Instruct',
|
||||
* label: 'Qwen2.5-72B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.1-70B-Instruct',
|
||||
* label: 'Llama-3.1-70B-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.1-405B',
|
||||
* label: 'Llama-3.1-405B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: '01-ai/Yi-1.5-34B-Chat',
|
||||
* label: 'Yi-1.5-34B-Chat (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'codellama/CodeLlama-34b-Instruct-hf',
|
||||
* label: 'CodeLlama-34b-Instruct (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'NousResearch/Hermes-3-Llama-3.1-8B',
|
||||
* label: 'Hermes-3-Llama-3.1-8B (HuggingFace)',
|
||||
* provider: 'HuggingFace',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://huggingface.co/settings/tokens',
|
||||
* },
|
||||
* {
|
||||
* name: 'OpenAI',
|
||||
* staticModels: [
|
||||
* { name: 'gpt-4o-mini', label: 'GPT-4o Mini', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://platform.openai.com/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'xAI',
|
||||
* staticModels: [{ name: 'grok-beta', label: 'xAI Grok Beta', provider: 'xAI', maxTokenAllowed: 8000 }],
|
||||
* getApiKeyLink: 'https://docs.x.ai/docs/quickstart#creating-an-api-key',
|
||||
* },
|
||||
* {
|
||||
* name: 'Deepseek',
|
||||
* staticModels: [
|
||||
* { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
* { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://platform.deepseek.com/apiKeys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Mistral',
|
||||
* staticModels: [
|
||||
* { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-codestral-mamba', label: 'Codestral Mamba', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'open-mistral-nemo', label: 'Mistral Nemo', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'ministral-8b-latest', label: 'Mistral 8B', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'mistral-small-latest', label: 'Mistral Small', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'codestral-latest', label: 'Codestral', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* { name: 'mistral-large-latest', label: 'Mistral Large Latest', provider: 'Mistral', maxTokenAllowed: 8000 },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://console.mistral.ai/api-keys/',
|
||||
* },
|
||||
* {
|
||||
* name: 'LMStudio',
|
||||
* staticModels: [],
|
||||
* getDynamicModels: getLMStudioModels,
|
||||
* getApiKeyLink: 'https://lmstudio.ai/',
|
||||
* labelForGetApiKey: 'Get LMStudio',
|
||||
* icon: 'i-ph:cloud-arrow-down',
|
||||
* },
|
||||
* {
|
||||
* name: 'Together',
|
||||
* getDynamicModels: getTogetherModels,
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* label: 'Qwen/Qwen2.5-Coder-32B-Instruct',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
* {
|
||||
* name: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
* label: 'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8000,
|
||||
* },
|
||||
*
|
||||
* {
|
||||
* name: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
|
||||
* label: 'Mixtral 8x7B Instruct',
|
||||
* provider: 'Together',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://api.together.xyz/settings/api-keys',
|
||||
* },
|
||||
* {
|
||||
* name: 'Perplexity',
|
||||
* staticModels: [
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-small-128k-online',
|
||||
* label: 'Sonar Small Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-large-128k-online',
|
||||
* label: 'Sonar Large Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* {
|
||||
* name: 'llama-3.1-sonar-huge-128k-online',
|
||||
* label: 'Sonar Huge Online',
|
||||
* provider: 'Perplexity',
|
||||
* maxTokenAllowed: 8192,
|
||||
* },
|
||||
* ],
|
||||
* getApiKeyLink: 'https://www.perplexity.ai/settings/api',
|
||||
* },
|
||||
*];
|
||||
*/
|
||||
|
||||
const providerBaseUrlEnvKeys: Record<string, { baseUrlKey?: string; apiTokenKey?: string }> = {};
|
||||
PROVIDER_LIST.forEach((provider) => {
|
||||
providerBaseUrlEnvKeys[provider.name] = {
|
||||
|
@ -1,4 +1,7 @@
|
||||
export type DebugLevel = 'trace' | 'debug' | 'info' | 'warn' | 'error';
|
||||
import { Chalk } from 'chalk';
|
||||
|
||||
const chalk = new Chalk({ level: 3 });
|
||||
|
||||
type LoggerFunction = (...messages: any[]) => void;
|
||||
|
||||
@ -13,9 +16,6 @@ interface Logger {
|
||||
|
||||
let currentLevel: DebugLevel = (import.meta.env.VITE_LOG_LEVEL ?? import.meta.env.DEV) ? 'debug' : 'info';
|
||||
|
||||
const isWorker = 'HTMLRewriter' in globalThis;
|
||||
const supportsColor = !isWorker;
|
||||
|
||||
export const logger: Logger = {
|
||||
trace: (...messages: any[]) => log('trace', undefined, messages),
|
||||
debug: (...messages: any[]) => log('debug', undefined, messages),
|
||||
@ -63,14 +63,8 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
|
||||
return `${acc} ${current}`;
|
||||
}, '');
|
||||
|
||||
if (!supportsColor) {
|
||||
console.log(`[${level.toUpperCase()}]`, allMessages);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const labelBackgroundColor = getColorForLevel(level);
|
||||
const labelTextColor = level === 'warn' ? 'black' : 'white';
|
||||
const labelTextColor = level === 'warn' ? '#000000' : '#FFFFFF';
|
||||
|
||||
const labelStyles = getLabelStyles(labelBackgroundColor, labelTextColor);
|
||||
const scopeStyles = getLabelStyles('#77828D', 'white');
|
||||
@ -81,7 +75,21 @@ function log(level: DebugLevel, scope: string | undefined, messages: any[]) {
|
||||
styles.push('', scopeStyles);
|
||||
}
|
||||
|
||||
console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages);
|
||||
let labelText = formatText(` ${level.toUpperCase()} `, labelTextColor, labelBackgroundColor);
|
||||
|
||||
if (scope) {
|
||||
labelText = `${labelText} ${formatText(` ${scope} `, '#FFFFFF', '77828D')}`;
|
||||
}
|
||||
|
||||
if (typeof window !== 'undefined') {
|
||||
console.log(`%c${level.toUpperCase()}${scope ? `%c %c${scope}` : ''}`, ...styles, allMessages);
|
||||
} else {
|
||||
console.log(`${labelText}`, allMessages);
|
||||
}
|
||||
}
|
||||
|
||||
function formatText(text: string, color: string, bg: string) {
|
||||
return chalk.bgHex(bg)(chalk.hex(color)(text));
|
||||
}
|
||||
|
||||
function getLabelStyles(color: string, textColor: string) {
|
||||
@ -104,7 +112,7 @@ function getColorForLevel(level: DebugLevel): string {
|
||||
return '#EE4744';
|
||||
}
|
||||
default: {
|
||||
return 'black';
|
||||
return '#000000';
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ ${templates
|
||||
Response Format:
|
||||
<selection>
|
||||
<templateName>{selected template name}</templateName>
|
||||
<reasoning>{brief explanation for the choice}</reasoning>
|
||||
<title>{a proper title for the project}</title>
|
||||
</selection>
|
||||
|
||||
Examples:
|
||||
@ -37,7 +37,7 @@ User: I need to build a todo app
|
||||
Response:
|
||||
<selection>
|
||||
<templateName>react-basic-starter</templateName>
|
||||
<reasoning>Simple React setup perfect for building a todo application</reasoning>
|
||||
<title>Simple React todo application</title>
|
||||
</selection>
|
||||
</example>
|
||||
|
||||
@ -46,7 +46,7 @@ User: Write a script to generate numbers from 1 to 100
|
||||
Response:
|
||||
<selection>
|
||||
<templateName>blank</templateName>
|
||||
<reasoning>This is a simple script that doesn't require any template setup</reasoning>
|
||||
<title>script to generate numbers from 1 to 100</title>
|
||||
</selection>
|
||||
</example>
|
||||
|
||||
@ -62,16 +62,17 @@ Important: Provide only the selection tags in your response, no additional text.
|
||||
|
||||
const templates: Template[] = STARTER_TEMPLATES.filter((t) => !t.name.includes('shadcn'));
|
||||
|
||||
const parseSelectedTemplate = (llmOutput: string): string | null => {
|
||||
const parseSelectedTemplate = (llmOutput: string): { template: string; title: string } | null => {
|
||||
try {
|
||||
// Extract content between <templateName> tags
|
||||
const templateNameMatch = llmOutput.match(/<templateName>(.*?)<\/templateName>/);
|
||||
const titleMatch = llmOutput.match(/<title>(.*?)<\/title>/);
|
||||
|
||||
if (!templateNameMatch) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return templateNameMatch[1].trim();
|
||||
return { template: templateNameMatch[1].trim(), title: titleMatch?.[1].trim() || 'Untitled Project' };
|
||||
} catch (error) {
|
||||
console.error('Error parsing template selection:', error);
|
||||
return null;
|
||||
@ -101,7 +102,10 @@ export const selectStarterTemplate = async (options: { message: string; model: s
|
||||
} else {
|
||||
console.log('No template selected, using blank template');
|
||||
|
||||
return 'blank';
|
||||
return {
|
||||
template: 'blank',
|
||||
title: '',
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@ -181,7 +185,7 @@ const getGitHubRepoContent = async (
|
||||
}
|
||||
};
|
||||
|
||||
export async function getTemplates(templateName: string) {
|
||||
export async function getTemplates(templateName: string, title?: string) {
|
||||
const template = STARTER_TEMPLATES.find((t) => t.name == templateName);
|
||||
|
||||
if (!template) {
|
||||
@ -211,7 +215,7 @@ export async function getTemplates(templateName: string) {
|
||||
|
||||
const filesToImport = {
|
||||
files: filteredFiles,
|
||||
ignoreFile: filteredFiles,
|
||||
ignoreFile: [] as typeof filteredFiles,
|
||||
};
|
||||
|
||||
if (templateIgnoreFile) {
|
||||
@ -227,7 +231,7 @@ export async function getTemplates(templateName: string) {
|
||||
}
|
||||
|
||||
const assistantMessage = `
|
||||
<boltArtifact id="imported-files" title="Importing Starter Files" type="bundled">
|
||||
<boltArtifact id="imported-files" title="${title || 'Importing Starter Files'}" type="bundled">
|
||||
${filesToImport.files
|
||||
.map(
|
||||
(file) =>
|
||||
@ -278,10 +282,16 @@ Any attempt to modify these protected files will result in immediate termination
|
||||
If you need to make changes to functionality, create new files instead of modifying the protected ones listed above.
|
||||
---
|
||||
`;
|
||||
userMessage += `
|
||||
}
|
||||
|
||||
userMessage += `
|
||||
---
|
||||
template import is done, and you can now use the imported files,
|
||||
edit only the files that need to be changed, and you can create new files as needed.
|
||||
NO NOT EDIT/WRITE ANY FILES THAT ALREADY EXIST IN THE PROJECT AND DOES NOT NEED TO BE MODIFIED
|
||||
---
|
||||
Now that the Template is imported please continue with my original request
|
||||
`;
|
||||
}
|
||||
|
||||
return {
|
||||
assistantMessage,
|
||||
|
@ -74,6 +74,7 @@
|
||||
"@xterm/addon-web-links": "^0.11.0",
|
||||
"@xterm/xterm": "^5.5.0",
|
||||
"ai": "^4.0.13",
|
||||
"chalk": "^5.4.1",
|
||||
"date-fns": "^3.6.0",
|
||||
"diff": "^5.2.0",
|
||||
"dotenv": "^16.4.7",
|
||||
|
@ -143,6 +143,9 @@ importers:
|
||||
ai:
|
||||
specifier: ^4.0.13
|
||||
version: 4.0.18(react@18.3.1)(zod@3.23.8)
|
||||
chalk:
|
||||
specifier: ^5.4.1
|
||||
version: 5.4.1
|
||||
date-fns:
|
||||
specifier: ^3.6.0
|
||||
version: 3.6.0
|
||||
@ -2604,8 +2607,8 @@ packages:
|
||||
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
|
||||
engines: {node: '>=10'}
|
||||
|
||||
chalk@5.3.0:
|
||||
resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==}
|
||||
chalk@5.4.1:
|
||||
resolution: {integrity: sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==}
|
||||
engines: {node: ^12.17.0 || ^14.13 || >=16.0.0}
|
||||
|
||||
character-entities-html4@2.1.0:
|
||||
@ -8207,7 +8210,7 @@ snapshots:
|
||||
ansi-styles: 4.3.0
|
||||
supports-color: 7.2.0
|
||||
|
||||
chalk@5.3.0: {}
|
||||
chalk@5.4.1: {}
|
||||
|
||||
character-entities-html4@2.1.0: {}
|
||||
|
||||
@ -9415,7 +9418,7 @@ snapshots:
|
||||
jsondiffpatch@0.6.0:
|
||||
dependencies:
|
||||
'@types/diff-match-patch': 1.0.36
|
||||
chalk: 5.3.0
|
||||
chalk: 5.4.1
|
||||
diff-match-patch: 1.0.5
|
||||
|
||||
jsonfile@6.1.0:
|
||||
|
Loading…
Reference in New Issue
Block a user