import { LLMModel } from "../client/api"; import { isMacOS } from "../utils"; import { getClientConfig } from "../config/client"; import { DEFAULT_INPUT_TEMPLATE, DEFAULT_MODELS, DEFAULT_SIDEBAR_WIDTH, StoreKey, } from "../constant"; import { createPersistStore } from "../utils/store"; export type ModelType = (typeof DEFAULT_MODELS)[number]["name"]; export enum SubmitKey { Enter = "Enter", CtrlEnter = "Ctrl + Enter", ShiftEnter = "Shift + Enter", AltEnter = "Alt + Enter", MetaEnter = "Meta + Enter", } export enum Theme { Auto = "auto", Dark = "dark", Light = "light", } export const DEFAULT_CONFIG = { lastUpdate: Date.now(), // timestamp, to merge state submitKey: isMacOS() ? SubmitKey.MetaEnter : SubmitKey.CtrlEnter, avatar: "1f603", fontSize: 14, theme: Theme.Auto as Theme, tightBorder: !!getClientConfig()?.isApp, sendPreviewBubble: true, enableAutoGenerateTitle: true, sidebarWidth: DEFAULT_SIDEBAR_WIDTH, disablePromptHint: false, dontShowMaskSplashScreen: false, // dont show splash screen when create chat hideBuiltinMasks: false, // dont add builtin masks customModels: "", models: DEFAULT_MODELS as any as LLMModel[], modelConfig: { model: "gpt-3.5-turbo" as ModelType, temperature: 0.5, top_p: 1, max_tokens: 4000, presence_penalty: 0, frequency_penalty: 0, sendMemory: true, historyMessageCount: 4, compressMessageLengthThreshold: 1000, enableInjectSystemPrompts: true, template: DEFAULT_INPUT_TEMPLATE, }, }; export type ChatConfig = typeof DEFAULT_CONFIG; export type ModelConfig = ChatConfig["modelConfig"]; export function limitNumber( x: number, min: number, max: number, defaultValue: number, ) { if (isNaN(x)) { return defaultValue; } return Math.min(max, Math.max(min, x)); } export const ModalConfigValidator = { model(x: string) { return x as ModelType; }, max_tokens(x: number) { return limitNumber(x, 0, 512000, 1024); }, presence_penalty(x: number) { return limitNumber(x, -2, 2, 0); }, frequency_penalty(x: number) { return limitNumber(x, -2, 2, 0); }, temperature(x: number) { return limitNumber(x, 0, 1, 1); }, top_p(x: number) { return limitNumber(x, 0, 1, 1); }, }; export const useAppConfig = createPersistStore( { ...DEFAULT_CONFIG }, (set, get) => ({ reset() { set(() => ({ ...DEFAULT_CONFIG })); }, mergeModels(newModels: LLMModel[]) { if (!newModels || newModels.length === 0) { return; } const oldModels = get().models; const modelMap: Record = {}; for (const model of oldModels) { model.available = false; modelMap[model.name] = model; } for (const model of newModels) { model.available = true; modelMap[model.name] = model; } set(() => ({ models: Object.values(modelMap), })); }, allModels() {}, }), { name: StoreKey.Config, version: 3.8, migrate(persistedState, version) { const state = persistedState as ChatConfig; if (version < 3.4) { state.modelConfig.sendMemory = true; state.modelConfig.historyMessageCount = 4; state.modelConfig.compressMessageLengthThreshold = 1000; state.modelConfig.frequency_penalty = 0; state.modelConfig.top_p = 1; state.modelConfig.template = DEFAULT_INPUT_TEMPLATE; state.dontShowMaskSplashScreen = false; state.hideBuiltinMasks = false; } if (version < 3.5) { state.customModels = "claude,claude-100k"; } if (version < 3.6) { state.modelConfig.enableInjectSystemPrompts = true; } if (version < 3.7) { state.enableAutoGenerateTitle = true; } if (version < 3.8) { state.lastUpdate = Date.now(); } return state as any; }, }, );