feat: store model configs in the database

This commit is contained in:
Jun Siang Cheah 2024-05-19 18:46:24 +08:00
parent 1bacd5d93f
commit 4002ead6af
50 changed files with 434 additions and 194 deletions

View File

@ -18,8 +18,9 @@ import requests
from pydantic import BaseModel, ConfigDict
from typing import Optional, List
from apps.web.models.models import Models
from utils.utils import get_verified_user, get_current_user, get_admin_user
from config import SRC_LOG_LEVELS, ENV, MODEL_CONFIG
from config import SRC_LOG_LEVELS
from constants import MESSAGES
import os
@ -77,11 +78,12 @@ with open(LITELLM_CONFIG_DIR, "r") as file:
app.state.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER.value
app.state.MODEL_FILTER_LIST = MODEL_FILTER_LIST.value
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("litellm")
]
app.state.ENABLE = ENABLE_LITELLM
app.state.CONFIG = litellm_config
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("litellm", [])
# Global variable to store the subprocess reference
background_process = None
@ -268,9 +270,9 @@ async def get_models(user=Depends(get_current_user)):
(
item
for item in app.state.MODEL_CONFIG
if item["name"] == model["model_name"]
if item.id == model["model_name"]
),
{},
None,
),
}
for model in app.state.CONFIG["model_list"]
@ -286,7 +288,7 @@ async def get_models(user=Depends(get_current_user)):
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
)

View File

@ -29,7 +29,7 @@ import time
from urllib.parse import urlparse
from typing import Optional, List, Union
from apps.web.models.models import Models
from apps.web.models.users import Users
from constants import ERROR_MESSAGES
from utils.utils import (
@ -46,7 +46,6 @@ from config import (
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
UPLOAD_DIR,
MODEL_CONFIG,
AppConfig,
)
from utils.misc import calculate_sha256
@ -67,7 +66,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("ollama", [])
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("ollama")
]
app.state.config.OLLAMA_BASE_URLS = OLLAMA_BASE_URLS
app.state.MODELS = {}
@ -179,7 +180,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["model"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["model"]), None
)

View File

@ -10,7 +10,7 @@ import logging
from pydantic import BaseModel
from apps.web.models.models import Models
from apps.web.models.users import Users
from constants import ERROR_MESSAGES
from utils.utils import (
@ -27,7 +27,6 @@ from config import (
CACHE_DIR,
ENABLE_MODEL_FILTER,
MODEL_FILTER_LIST,
MODEL_CONFIG,
AppConfig,
)
from typing import List, Optional
@ -53,7 +52,9 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.MODEL_CONFIG = MODEL_CONFIG.value.get("openai", [])
app.state.MODEL_CONFIG = [
model.to_form() for model in Models.get_all_models_by_source("openai")
]
app.state.config.ENABLE_OPENAI_API = ENABLE_OPENAI_API
@ -262,7 +263,7 @@ async def get_all_models():
def add_custom_info_to_model(model: dict):
model["custom_info"] = next(
(item for item in app.state.MODEL_CONFIG if item["id"] == model["id"]), {}
(item for item in app.state.MODEL_CONFIG if item.id == model["id"]), None
)

View File

@ -1,4 +1,4 @@
"""Peewee migrations -- 002_add_local_sharing.py.
"""Peewee migrations -- 008_add_models.py.
Some examples (model - class or model name)::
@ -37,43 +37,24 @@ with suppress(ImportError):
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your migrations here."""
# Adding fields created_at and updated_at to the 'user' table
migrator.add_fields(
"user",
created_at=pw.BigIntegerField(null=True), # Allow null for transition
updated_at=pw.BigIntegerField(null=True), # Allow null for transition
last_active_at=pw.BigIntegerField(null=True), # Allow null for transition
)
@migrator.create_model
class Model(pw.Model):
id = pw.TextField()
source = pw.TextField()
base_model = pw.TextField(null=True)
name = pw.TextField()
params = pw.TextField()
# Populate the new fields from an existing 'timestamp' field
migrator.sql(
'UPDATE "user" SET created_at = timestamp, updated_at = timestamp, last_active_at = timestamp WHERE timestamp IS NOT NULL'
)
class Meta:
table_name = "model"
# Now that the data has been copied, remove the original 'timestamp' field
migrator.remove_fields("user", "timestamp")
# Update the fields to be not null now that they are populated
migrator.change_fields(
"user",
created_at=pw.BigIntegerField(null=False),
updated_at=pw.BigIntegerField(null=False),
last_active_at=pw.BigIntegerField(null=False),
indexes = (
# Create a unique index on the id, source columns
(("id", "source"), True),
)
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
"""Write your rollback migrations here."""
# Recreate the timestamp field initially allowing null values for safe transition
migrator.add_fields("user", timestamp=pw.BigIntegerField(null=True))
# Copy the earliest created_at date back into the new timestamp field
# This assumes created_at was originally a copy of timestamp
migrator.sql('UPDATE "user" SET timestamp = created_at')
# Remove the created_at and updated_at fields
migrator.remove_fields("user", "created_at", "updated_at", "last_active_at")
# Finally, alter the timestamp field to not allow nulls if that was the original setting
migrator.change_fields("user", timestamp=pw.BigIntegerField(null=False))
migrator.remove_model("model")

View File

@ -0,0 +1,157 @@
import json
from typing import Optional
import peewee as pw
from playhouse.shortcuts import model_to_dict
from pydantic import BaseModel
from apps.web.internal.db import DB
####################
# Models DB Schema
####################
# ModelParams is a model for the data stored in the params field of the Model table
# It isn't currently used in the backend, but it's here as a reference
class ModelParams(BaseModel):
"""
A Pydantic model that represents the parameters of a model.
Attributes:
description (str): A description of the model.
vision_capable (bool): A flag indicating if the model is capable of vision and thus image inputs.
"""
description: str
vision_capable: bool
class Model(pw.Model):
id = pw.TextField()
"""
The model's id as used in the API. If set to an existing model, it will override the model.
"""
source = pw.TextField()
"""
The source of the model, e.g., ollama, openai, or litellm.
"""
base_model = pw.TextField(null=True)
"""
An optional pointer to the actual model that should be used when proxying requests.
Currently unused - but will be used to support Modelfile like behaviour in the future
"""
name = pw.TextField()
"""
The human-readable display name of the model.
"""
params = pw.TextField()
"""
Holds a JSON encoded blob of parameters, see `ModelParams`.
"""
class Meta:
database = DB
indexes = (
# Create a unique index on the id, source columns
(("id", "source"), True),
)
class ModelModel(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: str
def to_form(self) -> "ModelForm":
return ModelForm(**{**self.model_dump(), "params": json.loads(self.params)})
####################
# Forms
####################
class ModelForm(BaseModel):
id: str
source: str
base_model: Optional[str] = None
name: str
params: dict
def to_db_model(self) -> ModelModel:
return ModelModel(**{**self.model_dump(), "params": json.dumps(self.params)})
class ModelsTable:
def __init__(
self,
db: pw.SqliteDatabase | pw.PostgresqlDatabase,
):
self.db = db
self.db.create_tables([Model])
def get_all_models(self) -> list[ModelModel]:
return [ModelModel(**model_to_dict(model)) for model in Model.select()]
def get_all_models_by_source(self, source: str) -> list[ModelModel]:
return [
ModelModel(**model_to_dict(model))
for model in Model.select().where(Model.source == source)
]
def update_all_models(self, models: list[ModelForm]) -> bool:
try:
with self.db.atomic():
# Fetch current models from the database
current_models = self.get_all_models()
current_model_dict = {
(model.id, model.source): model for model in current_models
}
# Create a set of model IDs and sources from the current models and the new models
current_model_keys = set(current_model_dict.keys())
new_model_keys = set((model.id, model.source) for model in models)
# Determine which models need to be created, updated, or deleted
models_to_create = [
model
for model in models
if (model.id, model.source) not in current_model_keys
]
models_to_update = [
model
for model in models
if (model.id, model.source) in current_model_keys
]
models_to_delete = current_model_keys - new_model_keys
# Perform the necessary database operations
for model in models_to_create:
Model.create(**model.to_db_model().model_dump())
for model in models_to_update:
Model.update(**model.to_db_model().model_dump()).where(
(Model.id == model.id) & (Model.source == model.source)
).execute()
for model_id, model_source in models_to_delete:
Model.delete().where(
(Model.id == model_id) & (Model.source == model_source)
).execute()
return True
except Exception as e:
return False
Models = ModelsTable(DB)

View File

@ -549,10 +549,6 @@ WEBHOOK_URL = PersistentConfig(
ENABLE_ADMIN_EXPORT = os.environ.get("ENABLE_ADMIN_EXPORT", "True").lower() == "true"
MODEL_CONFIG = PersistentConfig(
"CONFIG_DATA", "models", {"ollama": [], "litellm": [], "openai": []}
)
####################################
# WEBUI_SECRET_KEY
####################################

View File

@ -35,9 +35,9 @@ from apps.web.main import app as webui_app
import asyncio
from pydantic import BaseModel
from typing import List
from typing import List, Optional
from apps.web.models.models import Models, ModelModel, ModelForm
from utils.utils import get_admin_user
from apps.rag.utils import rag_messages
@ -59,7 +59,6 @@ from config import (
SRC_LOG_LEVELS,
WEBHOOK_URL,
ENABLE_ADMIN_EXPORT,
MODEL_CONFIG,
AppConfig,
)
from constants import ERROR_MESSAGES
@ -113,7 +112,7 @@ app.state.config = AppConfig()
app.state.config.ENABLE_MODEL_FILTER = ENABLE_MODEL_FILTER
app.state.config.MODEL_FILTER_LIST = MODEL_FILTER_LIST
app.state.config.MODEL_CONFIG = MODEL_CONFIG
app.state.MODEL_CONFIG = [model.to_form() for model in Models.get_all_models()]
app.state.config.WEBHOOK_URL = WEBHOOK_URL
@ -310,43 +309,40 @@ async def update_model_filter_config(
}
class ModelConfig(BaseModel):
id: str
name: str
description: str
vision_capable: bool
class SetModelConfigForm(BaseModel):
ollama: List[ModelConfig]
litellm: List[ModelConfig]
openai: List[ModelConfig]
models: List[ModelForm]
@app.post("/api/config/models")
async def update_model_config(
form_data: SetModelConfigForm, user=Depends(get_admin_user)
):
data = form_data.model_dump()
if not Models.update_all_models(form_data.models):
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=ERROR_MESSAGES.DEFAULT("Failed to update model config"),
)
ollama_app.state.MODEL_CONFIG = data.get("ollama", [])
ollama_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "ollama"
]
openai_app.state.MODEL_CONFIG = data.get("openai", [])
openai_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "openai"
]
litellm_app.state.MODEL_CONFIG = data.get("litellm", [])
litellm_app.state.MODEL_CONFIG = [
model for model in form_data.models if model.source == "litellm"
]
app.state.config.MODEL_CONFIG = {
"ollama": ollama_app.state.MODEL_CONFIG,
"openai": openai_app.state.MODEL_CONFIG,
"litellm": litellm_app.state.MODEL_CONFIG,
}
app.state.MODEL_CONFIG = [model for model in form_data.models]
return {"models": app.state.config.MODEL_CONFIG}
return {"models": app.state.MODEL_CONFIG}
@app.get("/api/config/models")
async def get_model_config(user=Depends(get_admin_user)):
return {"models": app.state.config.MODEL_CONFIG}
return {"models": app.state.MODEL_CONFIG}
@app.get("/api/webhook")

View File

@ -226,16 +226,18 @@ export const getModelConfig = async (token: string): Promise<GlobalModelConfig>
export interface ModelConfig {
id: string;
name?: string;
name: string;
source: string;
base_model?: string;
params: ModelParams;
}
export interface ModelParams {
description?: string;
vision_capable?: boolean;
}
export interface GlobalModelConfig {
ollama: ModelConfig[];
litellm: ModelConfig[];
openai: ModelConfig[];
}
export type GlobalModelConfig = ModelConfig[];
export const updateModelConfig = async (token: string, config: GlobalModelConfig) => {
let error = null;
@ -246,7 +248,9 @@ export const updateModelConfig = async (token: string, config: GlobalModelConfig
'Content-Type': 'application/json',
Authorization: `Bearer ${token}`
},
body: JSON.stringify(config)
body: JSON.stringify({
models: config
})
})
.then(async (res) => {
if (!res.ok) throw await res.json();

View File

@ -34,7 +34,7 @@ export const getLiteLLMModels = async (token: string = '') => {
name: model.name ?? model.id,
external: true,
source: 'LiteLLM',
custom_info: model.custom_info ?? {}
custom_info: model.custom_info
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);

View File

@ -234,7 +234,7 @@ export const getOpenAIModels = async (token: string = '') => {
id: model.id,
name: model.name ?? model.id,
external: true,
custom_info: model.custom_info ?? {}
custom_info: model.custom_info
}))
.sort((a, b) => {
return a.name.localeCompare(b.name);

View File

@ -1,7 +1,7 @@
<script lang="ts">
import { toast } from 'svelte-sonner';
import { onMount, tick, getContext } from 'svelte';
import { type Model, mobile, modelfiles, settings, showSidebar } from '$lib/stores';
import { type Model, mobile, modelfiles, settings, showSidebar, models } from '$lib/stores';
import { blobToFile, calculateSHA256, findWordIndices } from '$lib/utils';
import {
@ -27,7 +27,8 @@
export let stopResponse: Function;
export let autoScroll = true;
export let selectedModel: Model | undefined;
export let selectedAtModel: Model | undefined;
export let selectedModels: [''];
let chatTextAreaElement: HTMLTextAreaElement;
let filesInputElement;
@ -52,6 +53,8 @@
let speechRecognition;
let visionCapableState = 'all';
$: if (prompt) {
if (chatTextAreaElement) {
chatTextAreaElement.style.height = '';
@ -59,6 +62,20 @@
}
}
$: {
if (selectedAtModel || selectedModels) {
visionCapableState = checkModelsAreVisionCapable();
if (visionCapableState === 'none') {
// Remove all image files
const fileCount = files.length;
files = files.filter((file) => file.type != 'image');
if (files.length < fileCount) {
toast.warning($i18n.t('All selected models do not support image input, removed images'));
}
}
}
}
let mediaRecorder;
let audioChunks = [];
let isRecording = false;
@ -326,6 +343,35 @@
}
};
const checkModelsAreVisionCapable = () => {
let modelsToCheck = [];
if (selectedAtModel !== undefined) {
modelsToCheck = [selectedAtModel.id];
} else {
modelsToCheck = selectedModels;
}
if (modelsToCheck.length == 0 || modelsToCheck[0] == '') {
return 'all';
}
let visionCapableCount = 0;
for (const modelName of modelsToCheck) {
const model = $models.find((m) => m.id === modelName);
if (!model) {
continue;
}
if (model.custom_info?.params.vision_capable ?? true) {
visionCapableCount++;
}
}
if (visionCapableCount == modelsToCheck.length) {
return 'all';
} else if (visionCapableCount == 0) {
return 'none';
} else {
return 'some';
}
};
onMount(() => {
window.setTimeout(() => chatTextAreaElement?.focus(), 0);
@ -358,12 +404,10 @@
inputFiles.forEach((file) => {
console.log(file, file.name.split('.').at(-1));
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) {
if (!(selectedModel.custom_info?.vision_capable ?? true)) {
toast.error($i18n.t('Selected model does not support image inputs.'));
if (visionCapableState == 'none') {
toast.error($i18n.t('Selected models do not support image inputs'));
return;
}
}
let reader = new FileReader();
reader.onload = (event) => {
files = [
@ -500,12 +544,12 @@
bind:chatInputPlaceholder
{messages}
on:select={(e) => {
selectedModel = e.detail;
selectedAtModel = e.detail;
chatTextAreaElement?.focus();
}}
/>
{#if selectedModel !== undefined}
{#if selectedAtModel !== undefined}
<div
class="px-3 py-2.5 text-left w-full flex justify-between items-center absolute bottom-0 left-0 right-0 bg-gradient-to-t from-50% from-white dark:from-gray-900"
>
@ -514,7 +558,7 @@
crossorigin="anonymous"
alt="model profile"
class="size-5 max-w-[28px] object-cover rounded-full"
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedModel.id)
src={$modelfiles.find((modelfile) => modelfile.tagName === selectedAtModel.id)
?.imageUrl ??
($i18n.language === 'dg-DG'
? `/doge.png`
@ -522,7 +566,7 @@
/>
<div>
Talking to <span class=" font-medium"
>{selectedModel.custom_info?.name ?? selectedModel.name}
>{selectedAtModel.custom_info?.name ?? selectedAtModel.name}
</span>
</div>
</div>
@ -530,7 +574,7 @@
<button
class="flex items-center"
on:click={() => {
selectedModel = undefined;
selectedAtModel = undefined;
}}
>
<XMark />
@ -556,14 +600,12 @@
const _inputFiles = Array.from(inputFiles);
_inputFiles.forEach((file) => {
if (['image/gif', 'image/jpeg', 'image/png'].includes(file['type'])) {
if (selectedModel !== undefined) {
if (!(selectedModel.custom_info?.vision_capable ?? true)) {
toast.error($i18n.t('Selected model does not support image inputs.'));
if (visionCapableState === 'none') {
toast.error($i18n.t('Selected models do not support image inputs'));
inputFiles = null;
filesInputElement.value = '';
return;
}
}
let reader = new FileReader();
reader.onload = (event) => {
files = [
@ -897,7 +939,7 @@
if (e.key === 'Escape') {
console.log('Escape');
selectedModel = undefined;
selectedAtModel = undefined;
}
}}
rows="1"

View File

@ -12,7 +12,12 @@
import { user, MODEL_DOWNLOAD_POOL, models, mobile } from '$lib/stores';
import { toast } from 'svelte-sonner';
import { capitalizeFirstLetter, getModels, splitStream } from '$lib/utils';
import {
capitalizeFirstLetter,
getModels,
sanitizeResponseContent,
splitStream
} from '$lib/utils';
import Tooltip from '$lib/components/common/Tooltip.svelte';
const i18n = getContext('i18n');
@ -23,7 +28,12 @@
export let searchEnabled = true;
export let searchPlaceholder = $i18n.t('Search a model');
export let items = [{ value: 'mango', label: 'Mango' }];
export let items: {
label: string;
value: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
[key: string]: any;
} = [];
export let className = ' w-[30rem]';
@ -248,12 +258,8 @@
<!-- {JSON.stringify(item.info)} -->
{#if item.info.external}
<Tooltip
content={`${item.info?.source ?? 'External'}${
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
>
<div class=" mr-2">
<Tooltip content={`${item.info?.source ?? 'External'}`}>
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
@ -279,11 +285,9 @@
item.info?.details?.quantization_level
? item.info?.details?.quantization_level + ' '
: ''
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}${
item.info.custom_info?.description ? '<br>' : ''
}${item.info.custom_info?.description?.replaceAll('\n', '<br>') ?? ''}`}
}${item.info.size ? `(${(item.info.size / 1024 ** 3).toFixed(1)}GB)` : ''}`}
>
<div class=" mr-2">
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
@ -301,8 +305,31 @@
</div>
</Tooltip>
{/if}
{#if item.info?.custom_info?.params.description}
<Tooltip
content={`${sanitizeResponseContent(
item.info.custom_info?.params.description
).replaceAll('\n', '<br>')}`}
>
<div class="">
<svg
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke-width="1.5"
stroke="currentColor"
class="w-4 h-4"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
d="M9.879 7.519c1.171-1.025 3.071-1.025 4.242 0 1.172 1.025 1.172 2.687 0 3.712-.203.179-.43.326-.67.442-.745.361-1.45.999-1.45 1.827v.75M21 12a9 9 0 1 1-18 0 9 9 0 0 1 18 0Zm-9 5.25h.008v.008H12v-.008Z"
/>
</svg>
</div>
</Tooltip>
{/if}
</div>
{#if value === item.value}
<div class="ml-auto">
<Check />

View File

@ -80,8 +80,8 @@
const model = $models.find((m) => m.id === selectedModelId);
if (model) {
modelName = model.custom_info?.name ?? model.name;
modelDescription = model.custom_info?.description ?? '';
modelIsVisionCapable = model.custom_info?.vision_capable ?? false;
modelDescription = model.custom_info?.params.description ?? '';
modelIsVisionCapable = model.custom_info?.params.vision_capable ?? false;
}
};
@ -521,13 +521,18 @@
const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
// Remove any existing config
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId);
modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
// Add new config
modelConfig[modelSource].push({
modelConfig.push({
id: selectedModelId,
name: modelName,
source: modelSource,
params: {
description: modelDescription,
vision_capable: modelIsVisionCapable
}
});
await updateModelConfig(localStorage.token, modelConfig);
toast.success(
@ -546,7 +551,9 @@
}
const modelSource =
'details' in model ? 'ollama' : model.source === 'LiteLLM' ? 'litellm' : 'openai';
modelConfig[modelSource] = modelConfig[modelSource].filter((m) => m.id !== selectedModelId);
modelConfig = modelConfig.filter(
(m) => !(m.id === selectedModelId && m.source === modelSource)
);
await updateModelConfig(localStorage.token, modelConfig);
toast.success(
$i18n.t('Model info for {{modelName}} deleted successfully', { modelName: selectedModelId })
@ -559,6 +566,9 @@
};
onMount(async () => {
console.log('mounting');
await Promise.all([
(async () => {
OLLAMA_URLS = await getOllamaUrls(localStorage.token).catch((error) => {
toast.error(error);
return [];
@ -567,10 +577,17 @@
if (OLLAMA_URLS.length > 0) {
selectedOllamaUrlIdx = 0;
}
})(),
(async () => {
liteLLMModelInfo = await getLiteLLMModelInfo(localStorage.token);
})(),
(async () => {
modelConfig = await getModelConfig(localStorage.token);
})(),
(async () => {
ollamaVersion = await getOllamaVersion(localStorage.token).catch((error) => false);
})()
]);
});
</script>

View File

@ -325,7 +325,7 @@
.filter((model) => model.name !== 'hr')
.map((model) => ({
value: model.id,
label: model.name,
label: model.custom_info?.name ?? model.name,
info: model
}))}
bind:value={selectedModelId}

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "التعليمات المتقدمة",
"all": "الكل",
"All Documents": "جميع الملفات",
"All selected models do not support image input, removed images": "",
"All Users": "جميع المستخدمين",
"Allow": "يسمح",
"Allow Chat Deletion": "يستطيع حذف المحادثات",
@ -392,7 +393,7 @@
"Select a model": "أختار الموديل",
"Select an Ollama instance": "أختار سيرفر ",
"Select model": " أختار موديل",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "يُرجى إدخال طلبك هنا",
"Send message": "يُرجى إدخال طلبك هنا.",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Разширени Параметри",
"all": "всички",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Всички Потребители",
"Allow": "Позволи",
"Allow Chat Deletion": "Позволи Изтриване на Чат",
@ -392,7 +393,7 @@
"Select a model": "Изберете модел",
"Select an Ollama instance": "Изберете Ollama инстанция",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Изпращане на Съобщение",
"Send message": "Изпращане на съобщение",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "এডভান্সড প্যারামিটার্স",
"all": "সব",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "সব ইউজার",
"Allow": "অনুমোদন",
"Allow Chat Deletion": "চ্যাট ডিলিট করতে দিন",
@ -392,7 +393,7 @@
"Select a model": "একটি মডেল নির্বাচন করুন",
"Select an Ollama instance": "একটি Ollama ইন্সট্যান্স নির্বাচন করুন",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "একটি মেসেজ পাঠান",
"Send message": "মেসেজ পাঠান",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Paràmetres Avançats",
"all": "tots",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Tots els Usuaris",
"Allow": "Permet",
"Allow Chat Deletion": "Permet la Supressió del Xat",
@ -392,7 +393,7 @@
"Select a model": "Selecciona un model",
"Select an Ollama instance": "Selecciona una instància d'Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Envia un Missatge",
"Send message": "Envia missatge",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Erweiterte Parameter",
"all": "Alle",
"All Documents": "Alle Dokumente",
"All selected models do not support image input, removed images": "",
"All Users": "Alle Benutzer",
"Allow": "Erlauben",
"Allow Chat Deletion": "Chat Löschung erlauben",
@ -392,7 +393,7 @@
"Select a model": "Ein Modell auswählen",
"Select an Ollama instance": "Eine Ollama Instanz auswählen",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Eine Nachricht senden",
"Send message": "Nachricht senden",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Advanced Parameters",
"all": "all",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "All Users",
"Allow": "Allow",
"Allow Chat Deletion": "Allow Delete Chats",
@ -392,7 +393,7 @@
"Select a model": "Select a model much choice",
"Select an Ollama instance": "Select an Ollama instance very choose",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Send a Message much message",
"Send message": "Send message very send",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "",
"all": "",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "",
"Allow": "",
"Allow Chat Deletion": "",
@ -392,7 +393,7 @@
"Select a model": "",
"Select an Ollama instance": "",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "",
"Send message": "",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "",
"all": "",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "",
"Allow": "",
"Allow Chat Deletion": "",
@ -392,7 +393,7 @@
"Select a model": "",
"Select an Ollama instance": "",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "",
"Send message": "",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Parámetros Avanzados",
"all": "todo",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Todos los Usuarios",
"Allow": "Permitir",
"Allow Chat Deletion": "Permitir Borrar Chats",
@ -392,7 +393,7 @@
"Select a model": "Selecciona un modelo",
"Select an Ollama instance": "Seleccione una instancia de Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Enviar un Mensaje",
"Send message": "Enviar Mensaje",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "پارامترهای پیشرفته",
"all": "همه",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "همه کاربران",
"Allow": "اجازه دادن",
"Allow Chat Deletion": "اجازه حذف گپ",
@ -392,7 +393,7 @@
"Select a model": "انتخاب یک مدل",
"Select an Ollama instance": "انتخاب یک نمونه از اولاما",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "ارسال یک پیام",
"Send message": "ارسال پیام",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Edistyneet parametrit",
"all": "kaikki",
"All Documents": "Kaikki asiakirjat",
"All selected models do not support image input, removed images": "",
"All Users": "Kaikki käyttäjät",
"Allow": "Salli",
"Allow Chat Deletion": "Salli keskustelujen poisto",
@ -392,7 +393,7 @@
"Select a model": "Valitse malli",
"Select an Ollama instance": "Valitse Ollama-instanssi",
"Select model": "Valitse malli",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Lähetä viesti",
"Send message": "Lähetä viesti",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Paramètres avancés",
"all": "tous",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Tous les utilisateurs",
"Allow": "Autoriser",
"Allow Chat Deletion": "Autoriser la suppression des discussions",
@ -392,7 +393,7 @@
"Select a model": "Sélectionnez un modèle",
"Select an Ollama instance": "Sélectionner une instance Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Envoyer un message",
"Send message": "Envoyer un message",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Paramètres avancés",
"all": "tous",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Tous les utilisateurs",
"Allow": "Autoriser",
"Allow Chat Deletion": "Autoriser la suppression du chat",
@ -392,7 +393,7 @@
"Select a model": "Sélectionner un modèle",
"Select an Ollama instance": "Sélectionner une instance Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Envoyer un message",
"Send message": "Envoyer un message",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "פרמטרים מתקדמים",
"all": "הכל",
"All Documents": "כל המסמכים",
"All selected models do not support image input, removed images": "",
"All Users": "כל המשתמשים",
"Allow": "אפשר",
"Allow Chat Deletion": "אפשר מחיקת צ'אט",
@ -392,7 +393,7 @@
"Select a model": "בחר מודל",
"Select an Ollama instance": "בחר מופע של Ollama",
"Select model": "בחר מודל",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "שלח הודעה",
"Send message": "שלח הודעה",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "उन्नत पैरामीटर",
"all": "सभी",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "सभी उपयोगकर्ता",
"Allow": "अनुमति दें",
"Allow Chat Deletion": "चैट हटाने की अनुमति दें",
@ -392,7 +393,7 @@
"Select a model": "एक मॉडल चुनें",
"Select an Ollama instance": "एक Ollama Instance चुनें",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "एक संदेश भेजो",
"Send message": "मेसेज भेजें",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Napredni parametri",
"all": "sve",
"All Documents": "Svi dokumenti",
"All selected models do not support image input, removed images": "",
"All Users": "Svi korisnici",
"Allow": "Dopusti",
"Allow Chat Deletion": "Dopusti brisanje razgovora",
@ -392,7 +393,7 @@
"Select a model": "Odaberite model",
"Select an Ollama instance": "Odaberite Ollama instancu",
"Select model": "Odaberite model",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Pošaljite poruku",
"Send message": "Pošalji poruku",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Parametri avanzati",
"all": "tutti",
"All Documents": "Tutti i documenti",
"All selected models do not support image input, removed images": "",
"All Users": "Tutti gli utenti",
"Allow": "Consenti",
"Allow Chat Deletion": "Consenti l'eliminazione della chat",
@ -392,7 +393,7 @@
"Select a model": "Seleziona un modello",
"Select an Ollama instance": "Seleziona un'istanza Ollama",
"Select model": "Seleziona modello",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Invia un messaggio",
"Send message": "Invia messaggio",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "詳細パラメーター",
"all": "すべて",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "すべてのユーザー",
"Allow": "許可",
"Allow Chat Deletion": "チャットの削除を許可",
@ -392,7 +393,7 @@
"Select a model": "モデルを選択",
"Select an Ollama instance": "Ollama インスタンスを選択",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "メッセージを送信",
"Send message": "メッセージを送信",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "დამატებითი პარამეტრები",
"all": "ყველა",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "ყველა მომხმარებელი",
"Allow": "ნების დართვა",
"Allow Chat Deletion": "მიმოწერის წაშლის დაშვება",
@ -392,7 +393,7 @@
"Select a model": "მოდელის არჩევა",
"Select an Ollama instance": "",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "შეტყობინების გაგზავნა",
"Send message": "შეტყობინების გაგზავნა",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "고급 매개변수",
"all": "모두",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "모든 사용자",
"Allow": "허용",
"Allow Chat Deletion": "채팅 삭제 허용",
@ -392,7 +393,7 @@
"Select a model": "모델 선택",
"Select an Ollama instance": "Ollama 인스턴스 선택",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "메시지 보내기",
"Send message": "메시지 보내기",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Geavanceerde Parameters",
"all": "alle",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Alle Gebruikers",
"Allow": "Toestaan",
"Allow Chat Deletion": "Sta Chat Verwijdering toe",
@ -392,7 +393,7 @@
"Select a model": "Selecteer een model",
"Select an Ollama instance": "Selecteer een Ollama instantie",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Stuur een Bericht",
"Send message": "Stuur bericht",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "ਉੱਚ ਸਤਰ ਦੇ ਪੈਰਾਮੀਟਰ",
"all": "ਸਾਰੇ",
"All Documents": "ਸਾਰੇ ਡਾਕੂਮੈਂਟ",
"All selected models do not support image input, removed images": "",
"All Users": "ਸਾਰੇ ਉਪਭੋਗਤਾ",
"Allow": "ਅਨੁਮਤੀ",
"Allow Chat Deletion": "ਗੱਲਬਾਤ ਮਿਟਾਉਣ ਦੀ ਆਗਿਆ ਦਿਓ",
@ -392,7 +393,7 @@
"Select a model": "ਇੱਕ ਮਾਡਲ ਚੁਣੋ",
"Select an Ollama instance": "ਇੱਕ ਓਲਾਮਾ ਇੰਸਟੈਂਸ ਚੁਣੋ",
"Select model": "ਮਾਡਲ ਚੁਣੋ",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "ਭੇਜੋ",
"Send a Message": "ਇੱਕ ਸੁਨੇਹਾ ਭੇਜੋ",
"Send message": "ਸੁਨੇਹਾ ਭੇਜੋ",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Zaawansowane parametry",
"all": "wszyscy",
"All Documents": "Wszystkie dokumenty",
"All selected models do not support image input, removed images": "",
"All Users": "Wszyscy użytkownicy",
"Allow": "Pozwól",
"Allow Chat Deletion": "Pozwól na usuwanie czatu",
@ -392,7 +393,7 @@
"Select a model": "Wybierz model",
"Select an Ollama instance": "Wybierz instancję Ollama",
"Select model": "Wybierz model",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Wyślij Wiadomość",
"Send message": "Wyślij wiadomość",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Parâmetros Avançados",
"all": "todos",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Todos os Usuários",
"Allow": "Permitir",
"Allow Chat Deletion": "Permitir Exclusão de Bate-papo",
@ -392,7 +393,7 @@
"Select a model": "Selecione um modelo",
"Select an Ollama instance": "Selecione uma instância Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Enviar uma Mensagem",
"Send message": "Enviar mensagem",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Parâmetros Avançados",
"all": "todos",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Todos os Usuários",
"Allow": "Permitir",
"Allow Chat Deletion": "Permitir Exclusão de Bate-papo",
@ -392,7 +393,7 @@
"Select a model": "Selecione um modelo",
"Select an Ollama instance": "Selecione uma instância Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Enviar uma Mensagem",
"Send message": "Enviar mensagem",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Расширенные Параметры",
"all": "всё",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Все пользователи",
"Allow": "Разрешить",
"Allow Chat Deletion": "Дозволять удаление чат",
@ -392,7 +393,7 @@
"Select a model": "Выберите модель",
"Select an Ollama instance": "Выберите экземпляр Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Отправить сообщение",
"Send message": "Отправить сообщение",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Напредни параметри",
"all": "сви",
"All Documents": "Сви документи",
"All selected models do not support image input, removed images": "",
"All Users": "Сви корисници",
"Allow": "Дозволи",
"Allow Chat Deletion": "Дозволи брисање ћаскања",
@ -392,7 +393,7 @@
"Select a model": "Изабери модел",
"Select an Ollama instance": "Изабери Ollama инстанцу",
"Select model": "Изабери модел",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Пошаљи поруку",
"Send message": "Пошаљи поруку",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Avancerade parametrar",
"all": "alla",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Alla användare",
"Allow": "Tillåt",
"Allow Chat Deletion": "Tillåt chattborttagning",
@ -392,7 +393,7 @@
"Select a model": "Välj en modell",
"Select an Ollama instance": "Välj en Ollama-instans",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Skicka ett meddelande",
"Send message": "Skicka meddelande",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Gelişmiş Parametreler",
"all": "tümü",
"All Documents": "Tüm Belgeler",
"All selected models do not support image input, removed images": "",
"All Users": "Tüm Kullanıcılar",
"Allow": "İzin ver",
"Allow Chat Deletion": "Sohbet Silmeye İzin Ver",
@ -392,7 +393,7 @@
"Select a model": "Bir model seç",
"Select an Ollama instance": "Bir Ollama örneği seçin",
"Select model": "Model seç",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Bir Mesaj Gönder",
"Send message": "Mesaj gönder",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Розширені параметри",
"all": "всі",
"All Documents": "Усі документи",
"All selected models do not support image input, removed images": "",
"All Users": "Всі користувачі",
"Allow": "Дозволити",
"Allow Chat Deletion": "Дозволити видалення чату",
@ -392,7 +393,7 @@
"Select a model": "Виберіть модель",
"Select an Ollama instance": "Виберіть екземпляр Ollama",
"Select model": "Вибрати модель",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Надіслати повідомлення",
"Send message": "Надіслати повідомлення",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "Các tham số Nâng cao",
"all": "tất cả",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "Danh sách người sử dụng",
"Allow": "Cho phép",
"Allow Chat Deletion": "Cho phép Xóa nội dung chat",
@ -392,7 +393,7 @@
"Select a model": "Chọn mô hình",
"Select an Ollama instance": "Chọn một thực thể Ollama",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "Gửi yêu cầu",
"Send message": "Gửi yêu cầu",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "高级参数",
"all": "所有",
"All Documents": "所有文档",
"All selected models do not support image input, removed images": "",
"All Users": "所有用户",
"Allow": "允许",
"Allow Chat Deletion": "允许删除聊天记录",
@ -392,7 +393,7 @@
"Select a model": "选择一个模型",
"Select an Ollama instance": "选择一个 Ollama 实例",
"Select model": "选择模型",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "发送",
"Send a Message": "发送消息",
"Send message": "发送消息",

View File

@ -29,6 +29,7 @@
"Advanced Parameters": "進階參數",
"all": "所有",
"All Documents": "",
"All selected models do not support image input, removed images": "",
"All Users": "所有使用者",
"Allow": "允許",
"Allow Chat Deletion": "允許刪除聊天紀錄",
@ -392,7 +393,7 @@
"Select a model": "選擇一個模型",
"Select an Ollama instance": "選擇 Ollama 實例",
"Select model": "",
"Selected model does not support image inputs.": "",
"Selected models do not support image inputs": "",
"Send": "",
"Send a Message": "傳送訊息",
"Send message": "傳送訊息",

View File

@ -1,5 +1,6 @@
import { APP_NAME } from '$lib/constants';
import { type Writable, writable } from 'svelte/store';
import type { GlobalModelConfig, ModelConfig } from '$lib/apis';
// Backend
export const WEBUI_NAME = writable(APP_NAME);
@ -44,17 +45,10 @@ export const showChangelog = writable(false);
export type Model = OpenAIModel | OllamaModel;
type ModelCustomInfo = {
id?: string;
name?: string;
description?: string;
vision_capable?: boolean;
};
type BaseModel = {
id: string;
name: string;
custom_info?: ModelCustomInfo;
custom_info?: ModelConfig;
};
export interface OpenAIModel extends BaseModel {
@ -143,19 +137,6 @@ type Config = {
model_config?: GlobalModelConfig;
};
type GlobalModelConfig = {
ollama?: ModelConfig[];
litellm?: ModelConfig[];
openai?: ModelConfig[];
};
type ModelConfig = {
id?: string;
name?: string;
description?: string;
vision_capable?: boolean;
};
type PromptSuggestion = {
content: string;
title: [string, string];

View File

@ -265,7 +265,7 @@
const hasImages = messages.some((message) =>
message.files?.some((file) => file.type === 'image')
);
if (hasImages && !(model.custom_info?.vision_capable ?? true)) {
if (hasImages && !(model.custom_info?.params.vision_capable ?? true)) {
toast.error(
$i18n.t('Model {{modelName}} is not vision capable', {
modelName: model.custom_info?.name ?? model.name ?? model.id
@ -949,6 +949,7 @@
bind:prompt
bind:autoScroll
bind:selectedModel={atSelectedModel}
{selectedModels}
{messages}
{submitPrompt}
{stopResponse}

View File

@ -269,7 +269,7 @@
const hasImages = messages.some((message) =>
message.files?.some((file) => file.type === 'image')
);
if (hasImages && !(model.custom_info?.vision_capable ?? true)) {
if (hasImages && !(model.custom_info?.params.vision_capable ?? true)) {
toast.error(
$i18n.t('Model {{modelName}} is not vision capable', {
modelName: model.custom_info?.name ?? model.name ?? model.id
@ -963,6 +963,7 @@
bind:prompt
bind:autoScroll
bind:selectedModel={atSelectedModel}
{selectedModels}
{messages}
{submitPrompt}
{stopResponse}