mirror of
https://github.com/open-webui/open-webui
synced 2025-03-22 22:07:15 +00:00
Merge remote-tracking branch 'origin/dev' into feat/model-config
This commit is contained in:
commit
1a16f8fb1c
19
CHANGELOG.md
19
CHANGELOG.md
@ -5,6 +5,25 @@ All notable changes to this project will be documented in this file.
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.1.125] - 2024-05-19
|
||||
|
||||
### Added
|
||||
|
||||
- **🔄 Updated UI**: Chat interface revamped with chat bubbles. Easily switch back to the old style via settings > interface > chat bubble UI.
|
||||
- **📂 Enhanced Sidebar UI**: Model files, documents, prompts, and playground merged into Workspace for streamlined access.
|
||||
- **🚀 Improved Many Model Interaction**: All responses now displayed simultaneously for a smoother experience.
|
||||
- **🐍 Python Code Execution**: Execute Python code locally in the browser with libraries like 'requests', 'beautifulsoup4', 'numpy', 'pandas', 'seaborn', 'matplotlib', 'scikit-learn', 'scipy', 'regex'.
|
||||
- **🧠 Experimental Memory Feature**: Manually input personal information you want LLMs to remember via settings > personalization > memory.
|
||||
- **💾 Persistent Settings**: Settings now saved as config.json for convenience.
|
||||
- **🩺 Health Check Endpoint**: Added for Docker deployment.
|
||||
- **↕️ RTL Support**: Toggle chat direction via settings > interface > chat direction.
|
||||
- **🖥️ PowerPoint Support**: RAG pipeline now supports PowerPoint documents.
|
||||
- **🌐 Language Updates**: Ukrainian, Turkish, Arabic, Chinese, Serbian, Vietnamese updated; Punjabi added.
|
||||
|
||||
### Changed
|
||||
|
||||
- **👤 Shared Chat Update**: Shared chat now includes creator user information.
|
||||
|
||||
## [0.1.124] - 2024-05-08
|
||||
|
||||
### Added
|
||||
|
@ -69,6 +69,7 @@ from utils.misc import (
|
||||
from utils.utils import get_current_user, get_admin_user
|
||||
|
||||
from config import (
|
||||
ENV,
|
||||
SRC_LOG_LEVELS,
|
||||
UPLOAD_DIR,
|
||||
DOCS_DIR,
|
||||
@ -260,7 +261,7 @@ async def update_embedding_config(
|
||||
app.state.config.OPENAI_API_BASE_URL = form_data.openai_config.url
|
||||
app.state.config.OPENAI_API_KEY = form_data.openai_config.key
|
||||
|
||||
update_embedding_model(app.state.config.RAG_EMBEDDING_MODEL), True
|
||||
update_embedding_model(app.state.config.RAG_EMBEDDING_MODEL)
|
||||
|
||||
app.state.EMBEDDING_FUNCTION = get_embedding_function(
|
||||
app.state.config.RAG_EMBEDDING_ENGINE,
|
||||
@ -951,3 +952,14 @@ def reset(user=Depends(get_admin_user)) -> bool:
|
||||
log.exception(e)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
if ENV == "dev":
|
||||
|
||||
@app.get("/ef")
|
||||
async def get_embeddings():
|
||||
return {"result": app.state.EMBEDDING_FUNCTION("hello world")}
|
||||
|
||||
@app.get("/ef/{text}")
|
||||
async def get_embeddings_text(text: str):
|
||||
return {"result": app.state.EMBEDDING_FUNCTION(text)}
|
||||
|
53
backend/apps/web/internal/migrations/008_add_memory.py
Normal file
53
backend/apps/web/internal/migrations/008_add_memory.py
Normal file
@ -0,0 +1,53 @@
|
||||
"""Peewee migrations -- 002_add_local_sharing.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['table_name'] # Return model in current state by name
|
||||
> Model = migrator.ModelClass # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.run(func, *args, **kwargs) # Run python function with the given args
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
> migrator.add_constraint(model, name, sql)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.drop_constraints(model, *constraints)
|
||||
|
||||
"""
|
||||
|
||||
from contextlib import suppress
|
||||
|
||||
import peewee as pw
|
||||
from peewee_migrate import Migrator
|
||||
|
||||
|
||||
with suppress(ImportError):
|
||||
import playhouse.postgres_ext as pw_pext
|
||||
|
||||
|
||||
def migrate(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
@migrator.create_model
|
||||
class Memory(pw.Model):
|
||||
id = pw.CharField(max_length=255, unique=True)
|
||||
user_id = pw.CharField(max_length=255)
|
||||
content = pw.TextField(null=False)
|
||||
updated_at = pw.BigIntegerField(null=False)
|
||||
created_at = pw.BigIntegerField(null=False)
|
||||
|
||||
class Meta:
|
||||
table_name = "memory"
|
||||
|
||||
|
||||
def rollback(migrator: Migrator, database: pw.Database, *, fake=False):
|
||||
"""Write your rollback migrations here."""
|
||||
|
||||
migrator.remove_model("memory")
|
@ -9,6 +9,7 @@ from apps.web.routers import (
|
||||
modelfiles,
|
||||
prompts,
|
||||
configs,
|
||||
memories,
|
||||
utils,
|
||||
)
|
||||
from config import (
|
||||
@ -41,6 +42,7 @@ app.state.config.USER_PERMISSIONS = USER_PERMISSIONS
|
||||
app.state.config.WEBHOOK_URL = WEBHOOK_URL
|
||||
app.state.AUTH_TRUSTED_EMAIL_HEADER = WEBUI_AUTH_TRUSTED_EMAIL_HEADER
|
||||
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=origins,
|
||||
@ -52,9 +54,12 @@ app.add_middleware(
|
||||
app.include_router(auths.router, prefix="/auths", tags=["auths"])
|
||||
app.include_router(users.router, prefix="/users", tags=["users"])
|
||||
app.include_router(chats.router, prefix="/chats", tags=["chats"])
|
||||
|
||||
app.include_router(documents.router, prefix="/documents", tags=["documents"])
|
||||
app.include_router(modelfiles.router, prefix="/modelfiles", tags=["modelfiles"])
|
||||
app.include_router(prompts.router, prefix="/prompts", tags=["prompts"])
|
||||
app.include_router(memories.router, prefix="/memories", tags=["memories"])
|
||||
|
||||
|
||||
app.include_router(configs.router, prefix="/configs", tags=["configs"])
|
||||
app.include_router(utils.router, prefix="/utils", tags=["utils"])
|
||||
|
118
backend/apps/web/models/memories.py
Normal file
118
backend/apps/web/models/memories.py
Normal file
@ -0,0 +1,118 @@
|
||||
from pydantic import BaseModel
|
||||
from peewee import *
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
from typing import List, Union, Optional
|
||||
|
||||
from apps.web.internal.db import DB
|
||||
from apps.web.models.chats import Chats
|
||||
|
||||
import time
|
||||
import uuid
|
||||
|
||||
####################
|
||||
# Memory DB Schema
|
||||
####################
|
||||
|
||||
|
||||
class Memory(Model):
|
||||
id = CharField(unique=True)
|
||||
user_id = CharField()
|
||||
content = TextField()
|
||||
updated_at = BigIntegerField()
|
||||
created_at = BigIntegerField()
|
||||
|
||||
class Meta:
|
||||
database = DB
|
||||
|
||||
|
||||
class MemoryModel(BaseModel):
|
||||
id: str
|
||||
user_id: str
|
||||
content: str
|
||||
updated_at: int # timestamp in epoch
|
||||
created_at: int # timestamp in epoch
|
||||
|
||||
|
||||
####################
|
||||
# Forms
|
||||
####################
|
||||
|
||||
|
||||
class MemoriesTable:
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
self.db.create_tables([Memory])
|
||||
|
||||
def insert_new_memory(
|
||||
self,
|
||||
user_id: str,
|
||||
content: str,
|
||||
) -> Optional[MemoryModel]:
|
||||
id = str(uuid.uuid4())
|
||||
|
||||
memory = MemoryModel(
|
||||
**{
|
||||
"id": id,
|
||||
"user_id": user_id,
|
||||
"content": content,
|
||||
"created_at": int(time.time()),
|
||||
"updated_at": int(time.time()),
|
||||
}
|
||||
)
|
||||
result = Memory.create(**memory.model_dump())
|
||||
if result:
|
||||
return memory
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_memories(self) -> List[MemoryModel]:
|
||||
try:
|
||||
memories = Memory.select()
|
||||
return [MemoryModel(**model_to_dict(memory)) for memory in memories]
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_memories_by_user_id(self, user_id: str) -> List[MemoryModel]:
|
||||
try:
|
||||
memories = Memory.select().where(Memory.user_id == user_id)
|
||||
return [MemoryModel(**model_to_dict(memory)) for memory in memories]
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_memory_by_id(self, id) -> Optional[MemoryModel]:
|
||||
try:
|
||||
memory = Memory.get(Memory.id == id)
|
||||
return MemoryModel(**model_to_dict(memory))
|
||||
except:
|
||||
return None
|
||||
|
||||
def delete_memory_by_id(self, id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.id == id)
|
||||
query.execute() # Remove the rows, return number of rows removed.
|
||||
|
||||
return True
|
||||
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_memories_by_user_id(self, user_id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.user_id == user_id)
|
||||
query.execute()
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def delete_memory_by_id_and_user_id(self, id: str, user_id: str) -> bool:
|
||||
try:
|
||||
query = Memory.delete().where(Memory.id == id, Memory.user_id == user_id)
|
||||
query.execute()
|
||||
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
Memories = MemoriesTable(DB)
|
145
backend/apps/web/routers/memories.py
Normal file
145
backend/apps/web/routers/memories.py
Normal file
@ -0,0 +1,145 @@
|
||||
from fastapi import Response, Request
|
||||
from fastapi import Depends, FastAPI, HTTPException, status
|
||||
from datetime import datetime, timedelta
|
||||
from typing import List, Union, Optional
|
||||
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel
|
||||
import logging
|
||||
|
||||
from apps.web.models.memories import Memories, MemoryModel
|
||||
|
||||
from utils.utils import get_verified_user
|
||||
from constants import ERROR_MESSAGES
|
||||
|
||||
from config import SRC_LOG_LEVELS, CHROMA_CLIENT
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["MODELS"])
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/ef")
|
||||
async def get_embeddings(request: Request):
|
||||
return {"result": request.app.state.EMBEDDING_FUNCTION("hello world")}
|
||||
|
||||
|
||||
############################
|
||||
# GetMemories
|
||||
############################
|
||||
|
||||
|
||||
@router.get("/", response_model=List[MemoryModel])
|
||||
async def get_memories(user=Depends(get_verified_user)):
|
||||
return Memories.get_memories_by_user_id(user.id)
|
||||
|
||||
|
||||
############################
|
||||
# AddMemory
|
||||
############################
|
||||
|
||||
|
||||
class AddMemoryForm(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
@router.post("/add", response_model=Optional[MemoryModel])
|
||||
async def add_memory(
|
||||
request: Request, form_data: AddMemoryForm, user=Depends(get_verified_user)
|
||||
):
|
||||
memory = Memories.insert_new_memory(user.id, form_data.content)
|
||||
memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
|
||||
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
collection.upsert(
|
||||
documents=[memory.content],
|
||||
ids=[memory.id],
|
||||
embeddings=[memory_embedding],
|
||||
metadatas=[{"created_at": memory.created_at}],
|
||||
)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
############################
|
||||
# QueryMemory
|
||||
############################
|
||||
|
||||
|
||||
class QueryMemoryForm(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
@router.post("/query")
|
||||
async def query_memory(
|
||||
request: Request, form_data: QueryMemoryForm, user=Depends(get_verified_user)
|
||||
):
|
||||
query_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
|
||||
results = collection.query(
|
||||
query_embeddings=[query_embedding],
|
||||
n_results=1, # how many results to return
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
############################
|
||||
# ResetMemoryFromVectorDB
|
||||
############################
|
||||
@router.get("/reset", response_model=bool)
|
||||
async def reset_memory_from_vector_db(
|
||||
request: Request, user=Depends(get_verified_user)
|
||||
):
|
||||
CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(name=f"user-memory-{user.id}")
|
||||
|
||||
memories = Memories.get_memories_by_user_id(user.id)
|
||||
for memory in memories:
|
||||
memory_embedding = request.app.state.EMBEDDING_FUNCTION(memory.content)
|
||||
collection.upsert(
|
||||
documents=[memory.content],
|
||||
ids=[memory.id],
|
||||
embeddings=[memory_embedding],
|
||||
)
|
||||
return True
|
||||
|
||||
|
||||
############################
|
||||
# DeleteMemoriesByUserId
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/user", response_model=bool)
|
||||
async def delete_memory_by_user_id(user=Depends(get_verified_user)):
|
||||
result = Memories.delete_memories_by_user_id(user.id)
|
||||
|
||||
if result:
|
||||
try:
|
||||
CHROMA_CLIENT.delete_collection(f"user-memory-{user.id}")
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
############################
|
||||
# DeleteMemoryById
|
||||
############################
|
||||
|
||||
|
||||
@router.delete("/{memory_id}", response_model=bool)
|
||||
async def delete_memory_by_id(memory_id: str, user=Depends(get_verified_user)):
|
||||
result = Memories.delete_memory_by_id_and_user_id(memory_id, user.id)
|
||||
|
||||
if result:
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(
|
||||
name=f"user-memory-{user.id}"
|
||||
)
|
||||
collection.delete(ids=[memory_id])
|
||||
return True
|
||||
|
||||
return False
|
@ -240,9 +240,15 @@ async def check_url(request: Request, call_next):
|
||||
return response
|
||||
|
||||
|
||||
app.mount("/api/v1", webui_app)
|
||||
app.mount("/litellm/api", litellm_app)
|
||||
@app.middleware("http")
|
||||
async def update_embedding_function(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
if "/embedding/update" in request.url.path:
|
||||
webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
|
||||
return response
|
||||
|
||||
|
||||
app.mount("/litellm/api", litellm_app)
|
||||
app.mount("/ollama", ollama_app)
|
||||
app.mount("/openai/api", openai_app)
|
||||
|
||||
@ -250,6 +256,10 @@ app.mount("/images/api/v1", images_app)
|
||||
app.mount("/audio/api/v1", audio_app)
|
||||
app.mount("/rag/api/v1", rag_app)
|
||||
|
||||
app.mount("/api/v1", webui_app)
|
||||
|
||||
webui_app.state.EMBEDDING_FUNCTION = rag_app.state.EMBEDDING_FUNCTION
|
||||
|
||||
|
||||
@app.get("/api/config")
|
||||
async def get_app_config():
|
||||
|
4
package-lock.json
generated
4
package-lock.json
generated
@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "open-webui",
|
||||
"version": "0.1.124",
|
||||
"version": "0.1.125",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "open-webui",
|
||||
"version": "0.1.124",
|
||||
"version": "0.1.125",
|
||||
"dependencies": {
|
||||
"@pyscript/core": "^0.4.32",
|
||||
"@sveltejs/adapter-node": "^1.3.1",
|
||||
|
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "open-webui",
|
||||
"version": "0.1.124",
|
||||
"version": "0.1.125",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "npm run pyodide:fetch && vite dev --host",
|
||||
|
@ -6,7 +6,8 @@ const packages = [
|
||||
'matplotlib',
|
||||
'scikit-learn',
|
||||
'scipy',
|
||||
'regex'
|
||||
'regex',
|
||||
'seaborn'
|
||||
];
|
||||
|
||||
import { loadPyodide } from 'pyodide';
|
||||
|
155
src/lib/apis/memories/index.ts
Normal file
155
src/lib/apis/memories/index.ts
Normal file
@ -0,0 +1,155 @@
|
||||
import { WEBUI_API_BASE_URL } from '$lib/constants';
|
||||
|
||||
export const getMemories = async (token: string) => {
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${WEBUI_API_BASE_URL}/memories`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
authorization: `Bearer ${token}`
|
||||
}
|
||||
})
|
||||
.then(async (res) => {
|
||||
if (!res.ok) throw await res.json();
|
||||
return res.json();
|
||||
})
|
||||
.catch((err) => {
|
||||
error = err.detail;
|
||||
console.log(err);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
export const addNewMemory = async (token: string, content: string) => {
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${WEBUI_API_BASE_URL}/memories/add`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
authorization: `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
content: content
|
||||
})
|
||||
})
|
||||
.then(async (res) => {
|
||||
if (!res.ok) throw await res.json();
|
||||
return res.json();
|
||||
})
|
||||
.catch((err) => {
|
||||
error = err.detail;
|
||||
console.log(err);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
export const queryMemory = async (token: string, content: string) => {
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${WEBUI_API_BASE_URL}/memories/query`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
authorization: `Bearer ${token}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
content: content
|
||||
})
|
||||
})
|
||||
.then(async (res) => {
|
||||
if (!res.ok) throw await res.json();
|
||||
return res.json();
|
||||
})
|
||||
.catch((err) => {
|
||||
error = err.detail;
|
||||
console.log(err);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
export const deleteMemoryById = async (token: string, id: string) => {
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${WEBUI_API_BASE_URL}/memories/${id}`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
authorization: `Bearer ${token}`
|
||||
}
|
||||
})
|
||||
.then(async (res) => {
|
||||
if (!res.ok) throw await res.json();
|
||||
return res.json();
|
||||
})
|
||||
.then((json) => {
|
||||
return json;
|
||||
})
|
||||
.catch((err) => {
|
||||
error = err.detail;
|
||||
|
||||
console.log(err);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
||||
|
||||
export const deleteMemoriesByUserId = async (token: string) => {
|
||||
let error = null;
|
||||
|
||||
const res = await fetch(`${WEBUI_API_BASE_URL}/memories/user`, {
|
||||
method: 'DELETE',
|
||||
headers: {
|
||||
Accept: 'application/json',
|
||||
'Content-Type': 'application/json',
|
||||
authorization: `Bearer ${token}`
|
||||
}
|
||||
})
|
||||
.then(async (res) => {
|
||||
if (!res.ok) throw await res.json();
|
||||
return res.json();
|
||||
})
|
||||
.then((json) => {
|
||||
return json;
|
||||
})
|
||||
.catch((err) => {
|
||||
error = err.detail;
|
||||
|
||||
console.log(err);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
return res;
|
||||
};
|
@ -644,7 +644,7 @@
|
||||
}}
|
||||
/>
|
||||
<form
|
||||
dir={$settings?.chatDirection}
|
||||
dir={$settings?.chatDirection ?? 'LTR'}
|
||||
class=" flex flex-col relative w-full rounded-3xl px-1.5 bg-gray-50 dark:bg-gray-850 dark:text-gray-100"
|
||||
on:submit|preventDefault={() => {
|
||||
submitPrompt(prompt, user);
|
||||
|
@ -361,13 +361,14 @@
|
||||
history: history
|
||||
});
|
||||
|
||||
const element = document.getElementById('messages-container');
|
||||
autoScroll =
|
||||
element.scrollHeight - element.scrollTop <= element.clientHeight + 50;
|
||||
|
||||
setTimeout(() => {
|
||||
scrollToBottom();
|
||||
}, 100);
|
||||
if (autoScroll) {
|
||||
const element = document.getElementById('messages-container');
|
||||
autoScroll =
|
||||
element.scrollHeight - element.scrollTop <= element.clientHeight + 50;
|
||||
setTimeout(() => {
|
||||
scrollToBottom();
|
||||
}, 100);
|
||||
}
|
||||
}}
|
||||
/>
|
||||
{/key}
|
||||
|
@ -30,70 +30,17 @@
|
||||
};
|
||||
|
||||
const checkPythonCode = (str) => {
|
||||
// Check if the string contains typical Python keywords, syntax, or functions
|
||||
const pythonKeywords = [
|
||||
'def',
|
||||
'class',
|
||||
'import',
|
||||
'from',
|
||||
'if',
|
||||
'else',
|
||||
'elif',
|
||||
'for',
|
||||
'while',
|
||||
'try',
|
||||
'except',
|
||||
'finally',
|
||||
'return',
|
||||
'yield',
|
||||
'lambda',
|
||||
'assert',
|
||||
'pass',
|
||||
'break',
|
||||
'continue',
|
||||
'global',
|
||||
'nonlocal',
|
||||
'del',
|
||||
'True',
|
||||
'False',
|
||||
'None',
|
||||
'and',
|
||||
'or',
|
||||
'not',
|
||||
'in',
|
||||
'is',
|
||||
'as',
|
||||
'with'
|
||||
];
|
||||
|
||||
for (let keyword of pythonKeywords) {
|
||||
if (str.includes(keyword)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the string contains typical Python syntax characters
|
||||
const pythonSyntax = [
|
||||
'def ',
|
||||
'class ',
|
||||
'import ',
|
||||
'from ',
|
||||
'if ',
|
||||
'else:',
|
||||
'elif ',
|
||||
'for ',
|
||||
'while ',
|
||||
'try:',
|
||||
'except:',
|
||||
'finally:',
|
||||
'return ',
|
||||
'yield ',
|
||||
'lambda ',
|
||||
'assert ',
|
||||
'pass',
|
||||
'break',
|
||||
'continue',
|
||||
'global ',
|
||||
'nonlocal ',
|
||||
'del ',
|
||||
'True',
|
||||
@ -104,29 +51,7 @@
|
||||
' not ',
|
||||
' in ',
|
||||
' is ',
|
||||
' as ',
|
||||
' with ',
|
||||
':',
|
||||
'=',
|
||||
'==',
|
||||
'!=',
|
||||
'>',
|
||||
'<',
|
||||
'>=',
|
||||
'<=',
|
||||
'+',
|
||||
'-',
|
||||
'*',
|
||||
'/',
|
||||
'%',
|
||||
'**',
|
||||
'//',
|
||||
'(',
|
||||
')',
|
||||
'[',
|
||||
']',
|
||||
'{',
|
||||
'}'
|
||||
' with '
|
||||
];
|
||||
|
||||
for (let syntax of pythonSyntax) {
|
||||
@ -186,7 +111,8 @@
|
||||
code.includes('matplotlib') ? 'matplotlib' : null,
|
||||
code.includes('sklearn') ? 'scikit-learn' : null,
|
||||
code.includes('scipy') ? 'scipy' : null,
|
||||
code.includes('re') ? 'regex' : null
|
||||
code.includes('re') ? 'regex' : null,
|
||||
code.includes('seaborn') ? 'seaborn' : null
|
||||
].filter(Boolean);
|
||||
|
||||
console.log(packages);
|
||||
@ -235,7 +161,8 @@ __builtins__.input = input`);
|
||||
code.includes('pandas') ? 'pandas' : null,
|
||||
code.includes('sklearn') ? 'scikit-learn' : null,
|
||||
code.includes('scipy') ? 'scipy' : null,
|
||||
code.includes('re') ? 'regex' : null
|
||||
code.includes('re') ? 'regex' : null,
|
||||
code.includes('seaborn') ? 'seaborn' : null
|
||||
].filter(Boolean);
|
||||
|
||||
console.log(packages);
|
||||
|
@ -68,11 +68,11 @@
|
||||
<!-- svelte-ignore a11y-click-events-have-key-events -->
|
||||
|
||||
<div
|
||||
class=" snap-center min-w-80 w-full max-w-full m-1 outline outline-1 {history.messages[
|
||||
class=" snap-center min-w-80 w-full max-w-full m-1 border {history.messages[
|
||||
currentMessageId
|
||||
].model === model
|
||||
? 'outline-gray-200 dark:outline-gray-700 outline-2'
|
||||
: 'outline-gray-100 dark:outline-gray-850 '} transition p-6 rounded-3xl"
|
||||
? 'border-gray-100 dark:border-gray-700 border-[1.5px]'
|
||||
: 'border-gray-50 dark:border-gray-850 '} transition p-5 rounded-3xl"
|
||||
on:click={() => {
|
||||
currentMessageId = groupedMessages[model].messages[groupedMessagesIdx[model]].id;
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
export let src = '/user.png';
|
||||
</script>
|
||||
|
||||
<div class={$settings?.chatDirection === 'LTR' ? 'mr-3' : 'ml-3'}>
|
||||
<div class={($settings?.chatDirection ?? 'LTR') === 'LTR' ? 'mr-3' : 'ml-3'}>
|
||||
<img
|
||||
crossorigin="anonymous"
|
||||
src={src.startsWith(WEBUI_BASE_URL) ||
|
||||
|
@ -35,7 +35,7 @@
|
||||
[key: string]: any;
|
||||
} = [];
|
||||
|
||||
export let className = ' w-[30rem]';
|
||||
export let className = 'w-[30rem]';
|
||||
|
||||
let show = false;
|
||||
|
||||
@ -213,7 +213,9 @@
|
||||
</DropdownMenu.Trigger>
|
||||
|
||||
<DropdownMenu.Content
|
||||
class=" z-40 {className} max-w-[calc(100vw-1rem)] justify-start rounded-xl bg-white dark:bg-gray-850 dark:text-white shadow-lg border border-gray-300/30 dark:border-gray-700/50 outline-none "
|
||||
class=" z-40 {$mobile
|
||||
? `w-full`
|
||||
: `${className}`} max-w-[calc(100vw-1rem)] justify-start rounded-xl bg-white dark:bg-gray-850 dark:text-white shadow-lg border border-gray-300/30 dark:border-gray-700/50 outline-none "
|
||||
transition={flyAndScale}
|
||||
side={$mobile ? 'bottom' : 'bottom-start'}
|
||||
sideOffset={4}
|
||||
|
@ -16,11 +16,11 @@
|
||||
let showManageModal = false;
|
||||
|
||||
// Addons
|
||||
let enableMemory = true;
|
||||
let enableMemory = false;
|
||||
|
||||
onMount(async () => {
|
||||
let settings = JSON.parse(localStorage.getItem('settings') ?? '{}');
|
||||
enableMemory = settings?.memory ?? true;
|
||||
enableMemory = settings?.memory ?? false;
|
||||
});
|
||||
</script>
|
||||
|
||||
@ -58,8 +58,8 @@
|
||||
|
||||
<div class="text-xs text-gray-600 dark:text-gray-400">
|
||||
<div>
|
||||
LLMs will become more helpful as you chat, picking up on details and preferences to tailor
|
||||
its responses to you.
|
||||
You can personalize your interactions with LLMs by adding memories through the 'Manage'
|
||||
button below, making them more helpful and tailored to you.
|
||||
</div>
|
||||
|
||||
<!-- <div class="mt-3">
|
||||
|
@ -1,17 +1,37 @@
|
||||
<script>
|
||||
import { getContext } from 'svelte';
|
||||
import { createEventDispatcher, getContext } from 'svelte';
|
||||
|
||||
import Modal from '$lib/components/common/Modal.svelte';
|
||||
import { addNewMemory } from '$lib/apis/memories';
|
||||
import { toast } from 'svelte-sonner';
|
||||
|
||||
const dispatch = createEventDispatcher();
|
||||
|
||||
export let show;
|
||||
|
||||
const i18n = getContext('i18n');
|
||||
|
||||
let loading = false;
|
||||
let memory = '';
|
||||
let content = '';
|
||||
|
||||
const submitHandler = () => {
|
||||
console.log('submitHandler');
|
||||
const submitHandler = async () => {
|
||||
loading = true;
|
||||
|
||||
const res = await addNewMemory(localStorage.token, content).catch((error) => {
|
||||
toast.error(error);
|
||||
|
||||
return null;
|
||||
});
|
||||
|
||||
if (res) {
|
||||
console.log(res);
|
||||
toast.success('Memory added successfully');
|
||||
content = '';
|
||||
show = false;
|
||||
dispatch('save');
|
||||
}
|
||||
|
||||
loading = false;
|
||||
};
|
||||
</script>
|
||||
|
||||
@ -48,7 +68,7 @@
|
||||
>
|
||||
<div class="">
|
||||
<textarea
|
||||
bind:value={memory}
|
||||
bind:value={content}
|
||||
class=" bg-transparent w-full text-sm resize-none rounded-xl p-3 outline outline-1 outline-gray-100 dark:outline-gray-800"
|
||||
rows="3"
|
||||
placeholder={$i18n.t('Enter a detail about yourself for your LLMs to recall')}
|
||||
|
@ -7,6 +7,9 @@
|
||||
|
||||
import Modal from '$lib/components/common/Modal.svelte';
|
||||
import AddMemoryModal from './AddMemoryModal.svelte';
|
||||
import { deleteMemoriesByUserId, deleteMemoryById, getMemories } from '$lib/apis/memories';
|
||||
import Tooltip from '$lib/components/common/Tooltip.svelte';
|
||||
import { error } from '@sveltejs/kit';
|
||||
|
||||
const i18n = getContext('i18n');
|
||||
|
||||
@ -18,7 +21,7 @@
|
||||
|
||||
$: if (show) {
|
||||
(async () => {
|
||||
// chats = await getArchivedChatList(localStorage.token);
|
||||
memories = await getMemories(localStorage.token);
|
||||
})();
|
||||
}
|
||||
</script>
|
||||
@ -65,20 +68,54 @@
|
||||
</thead>
|
||||
<tbody>
|
||||
{#each memories as memory}
|
||||
<tr class="border-b dark:border-gray-800">
|
||||
<td class="px-3 py-2"> {memory.name} </td>
|
||||
<td class="px-3 py-2 hidden md:flex">
|
||||
{dayjs(memory.created_at).format($i18n.t('MMMM DD, YYYY'))}
|
||||
<tr class="border-b dark:border-gray-800 items-center">
|
||||
<td class="px-3 py-1">
|
||||
<div class="line-clamp-1">
|
||||
{memory.content}
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-3 py-2 text-right">
|
||||
<button
|
||||
class="text-xs text-gray-500 dark:text-gray-400"
|
||||
on:click={() => {
|
||||
// showMemory(memory);
|
||||
}}
|
||||
>
|
||||
{$i18n.t('View')}
|
||||
</button>
|
||||
<td class=" px-3 py-1 hidden md:flex h-[2.5rem]">
|
||||
<div class="my-auto whitespace-nowrap">
|
||||
{dayjs(memory.created_at * 1000).format($i18n.t('MMMM DD, YYYY'))}
|
||||
</div>
|
||||
</td>
|
||||
<td class="px-3 py-1">
|
||||
<div class="flex justify-end w-full">
|
||||
<Tooltip content="Delete">
|
||||
<button
|
||||
class="self-center w-fit text-sm px-2 py-2 hover:bg-black/5 dark:hover:bg-white/5 rounded-xl"
|
||||
on:click={async () => {
|
||||
const res = await deleteMemoryById(
|
||||
localStorage.token,
|
||||
memory.id
|
||||
).catch((error) => {
|
||||
toast.error(error);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (res) {
|
||||
toast.success('Memory deleted successfully');
|
||||
memories = await getMemories(localStorage.token);
|
||||
}
|
||||
}}
|
||||
>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
fill="none"
|
||||
viewBox="0 0 24 24"
|
||||
stroke-width="1.5"
|
||||
stroke="currentColor"
|
||||
class="w-4 h-4"
|
||||
>
|
||||
<path
|
||||
stroke-linecap="round"
|
||||
stroke-linejoin="round"
|
||||
d="m14.74 9-.346 9m-4.788 0L9.26 9m9.968-3.21c.342.052.682.107 1.022.166m-1.022-.165L18.16 19.673a2.25 2.25 0 0 1-2.244 2.077H8.084a2.25 2.25 0 0 1-2.244-2.077L4.772 5.79m14.456 0a48.108 48.108 0 0 0-3.478-.397m-12 .562c.34-.059.68-.114 1.022-.165m0 0a48.11 48.11 0 0 1 3.478-.397m7.5 0v-.916c0-1.18-.91-2.164-2.09-2.201a51.964 51.964 0 0 0-3.32 0c-1.18.037-2.09 1.022-2.09 2.201v.916m7.5 0a48.667 48.667 0 0 0-7.5 0"
|
||||
/>
|
||||
</svg>
|
||||
</button>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
{/each}
|
||||
@ -89,9 +126,7 @@
|
||||
{:else}
|
||||
<div class="text-center flex h-full text-sm w-full">
|
||||
<div class=" my-auto pb-10 px-4 w-full text-gray-500">
|
||||
{$i18n.t(
|
||||
'As you chat with LLMs, the details and preferences it remembers will be shown here.'
|
||||
)}
|
||||
{$i18n.t('Memories accessible by LLMs will be shown here.')}
|
||||
</div>
|
||||
</div>
|
||||
{/if}
|
||||
@ -103,13 +138,28 @@
|
||||
showAddMemoryModal = true;
|
||||
}}>Add memory</button
|
||||
>
|
||||
<!-- <button
|
||||
<button
|
||||
class=" px-3.5 py-1.5 font-medium text-red-500 hover:bg-black/5 dark:hover:bg-white/5 outline outline-1 outline-red-300 dark:outline-red-800 rounded-3xl"
|
||||
>Clear memory</button
|
||||
> -->
|
||||
on:click={async () => {
|
||||
const res = await deleteMemoriesByUserId(localStorage.token).catch((error) => {
|
||||
toast.error(error);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (res) {
|
||||
toast.success('Memory cleared successfully');
|
||||
memories = [];
|
||||
}
|
||||
}}>Clear memory</button
|
||||
>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</Modal>
|
||||
|
||||
<AddMemoryModal bind:show={showAddMemoryModal} />
|
||||
<AddMemoryModal
|
||||
bind:show={showAddMemoryModal}
|
||||
on:save={async () => {
|
||||
memories = await getMemories(localStorage.token);
|
||||
}}
|
||||
/>
|
||||
|
@ -254,6 +254,8 @@
|
||||
embeddingModel = '';
|
||||
} else if (e.target.value === 'openai') {
|
||||
embeddingModel = 'text-embedding-3-small';
|
||||
} else if (e.target.value === '') {
|
||||
embeddingModel = 'sentence-transformers/all-MiniLM-L6-v2';
|
||||
}
|
||||
}}
|
||||
>
|
||||
|
@ -329,7 +329,6 @@
|
||||
info: model
|
||||
}))}
|
||||
bind:value={selectedModelId}
|
||||
className="w-[42rem]"
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "الأرشيف المحادثات",
|
||||
"are allowed - Activate this command by typing": "مسموح - قم بتنشيط هذا الأمر عن طريق الكتابة",
|
||||
"Are you sure?": "هل أنت متأكد ؟",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "أرفق ملف",
|
||||
"Attention to detail": "انتبه للتفاصيل",
|
||||
"Audio": "صوتي",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "استيراد ملفات النماذج",
|
||||
"Import Prompts": "مطالبات الاستيراد",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "قم بتضمين علامة `-api` عند تشغيل Stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "إدخال الأوامر",
|
||||
"Interface": "واجهه المستخدم",
|
||||
"Invalid Tag": "تاق غير صالحة",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "يمكن تنزيل 3 نماذج كحد أقصى في وقت واحد. الرجاء معاودة المحاولة في وقت لاحق.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "الحد الأدنى من النقاط",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "المتغير",
|
||||
"variable to have them replaced with clipboard content.": "متغير لاستبدالها بمحتوى الحافظة.",
|
||||
"Version": "إصدار",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "تحذير: إذا قمت بتحديث أو تغيير نموذج التضمين الخاص بك، فستحتاج إلى إعادة استيراد كافة المستندات.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Добавяне на кратко описание за това какво прави този модфайл",
|
||||
"Add a short title for this prompt": "Добавяне на кратко заглавие за този промпт",
|
||||
"Add a tag": "Добавяне на таг",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Добавяне на собствен промпт",
|
||||
"Add Docs": "Добавяне на Документи",
|
||||
"Add Files": "Добавяне на Файлове",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "",
|
||||
"are allowed - Activate this command by typing": "са разрешени - Активирайте тази команда чрез въвеждане",
|
||||
"Are you sure?": "Сигурни ли сте?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "Прикачване на файл",
|
||||
"Attention to detail": "",
|
||||
"Audio": "Аудио",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Импортване на модфайлове",
|
||||
"Import Prompts": "Импортване на промптове",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Включете флага `--api`, когато стартирате stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Въведете команди",
|
||||
"Interface": "Интерфейс",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимум 3 модели могат да бъдат сваляни едновременно. Моля, опитайте отново по-късно.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Изберете режим",
|
||||
"Select a model": "Изберете модел",
|
||||
"Select an Ollama instance": "Изберете Ollama инстанция",
|
||||
"Select model": "",
|
||||
"Select model": "Изберете модел",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Изпращане на Съобщение",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "променлива",
|
||||
"variable to have them replaced with clipboard content.": "променливи да се заменят съдържанието от клипборд.",
|
||||
"Version": "Версия",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Уеб",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "এই মডেলফাইলটির সম্পর্কে সংক্ষিপ্ত বিবরণ যোগ করুন",
|
||||
"Add a short title for this prompt": "এই প্রম্পটের জন্য একটি সংক্ষিপ্ত টাইটেল যোগ করুন",
|
||||
"Add a tag": "একটি ট্যাগ যোগ করুন",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "একটি কাস্টম প্রম্পট যোগ করুন",
|
||||
"Add Docs": "ডকুমেন্ট যোগ করুন",
|
||||
"Add Files": "ফাইল যোগ করুন",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "চ্যাট ইতিহাস সংরক্ষণাগার",
|
||||
"are allowed - Activate this command by typing": "অনুমোদিত - কমান্ডটি চালু করার জন্য লিখুন",
|
||||
"Are you sure?": "আপনি নিশ্চিত?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "ফাইল যুক্ত করুন",
|
||||
"Attention to detail": "বিস্তারিত বিশেষতা",
|
||||
"Audio": "অডিও",
|
||||
"August": "",
|
||||
"Auto-playback response": "রেসপন্স অটো-প্লেব্যাক",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "মডেলফাইলগুলো ইমপোর্ট করুন",
|
||||
"Import Prompts": "প্রম্পটগুলো ইমপোর্ট করুন",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui চালু করার সময় `--api` ফ্ল্যাগ সংযুক্ত করুন",
|
||||
"Input commands": "",
|
||||
"Input commands": "ইনপুট কমান্ডস",
|
||||
"Interface": "ইন্টারফেস",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "সর্বোচ্চ টোকন",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "একসঙ্গে সর্বোচ্চ তিনটি মডেল ডাউনলোড করা যায়। দয়া করে পরে আবার চেষ্টা করুন।",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "একটি মডেল নির্বাচন করুন",
|
||||
"Select a model": "একটি মডেল নির্বাচন করুন",
|
||||
"Select an Ollama instance": "একটি Ollama ইন্সট্যান্স নির্বাচন করুন",
|
||||
"Select model": "",
|
||||
"Select model": "মডেল নির্বাচন করুন",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "একটি মেসেজ পাঠান",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "ভেরিয়েবল",
|
||||
"variable to have them replaced with clipboard content.": "ক্লিপবোর্ডের কন্টেন্ট দিয়ে যেই ভেরিয়েবল রিপ্লেস করা যাবে।",
|
||||
"Version": "ভার্সন",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "ওয়েব",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Afegeix una descripció curta del que fa aquest arxiu de model",
|
||||
"Add a short title for this prompt": "Afegeix un títol curt per aquest prompt",
|
||||
"Add a tag": "Afegeix una etiqueta",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Afegir un prompt personalitzat",
|
||||
"Add Docs": "Afegeix Documents",
|
||||
"Add Files": "Afegeix Arxius",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "Arxiu d'historial de xat",
|
||||
"are allowed - Activate this command by typing": "estan permesos - Activa aquesta comanda escrivint",
|
||||
"Are you sure?": "Estàs segur?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Adjuntar arxiu",
|
||||
"Attention to detail": "Detall atent",
|
||||
"Audio": "Àudio",
|
||||
"August": "",
|
||||
"Auto-playback response": "Resposta de reproducció automàtica",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importa Fitxers de Model",
|
||||
"Import Prompts": "Importa Prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inclou la bandera `--api` quan executis stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Entra ordres",
|
||||
"Interface": "Interfície",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Màxim de Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Es poden descarregar un màxim de 3 models simultàniament. Si us plau, prova-ho més tard.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Selecciona un mode",
|
||||
"Select a model": "Selecciona un model",
|
||||
"Select an Ollama instance": "Selecciona una instància d'Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Selecciona un model",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Envia un Missatge",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable per tenir-les reemplaçades amb el contingut del porta-retalls.",
|
||||
"Version": "Versió",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Archivierte Chats",
|
||||
"are allowed - Activate this command by typing": "sind erlaubt - Aktiviere diesen Befehl, indem du",
|
||||
"Are you sure?": "Bist du sicher?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Datei anhängen",
|
||||
"Attention to detail": "Auge fürs Detail",
|
||||
"Audio": "Audio",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Maximale Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Es können maximal 3 Modelle gleichzeitig heruntergeladen werden. Bitte versuche es später erneut.",
|
||||
"May": "Mai",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Mindestscore",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Einen Modus auswählen",
|
||||
"Select a model": "Ein Modell auswählen",
|
||||
"Select an Ollama instance": "Eine Ollama Instanz auswählen",
|
||||
"Select model": "",
|
||||
"Select model": "Modell auswählen",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Eine Nachricht senden",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "Variable",
|
||||
"variable to have them replaced with clipboard content.": "Variable, um den Inhalt der Zwischenablage beim Nutzen des Prompts zu ersetzen.",
|
||||
"Version": "Version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Warnung: Wenn du dein Einbettungsmodell aktualisierst oder änderst, musst du alle Dokumente erneut importieren.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "",
|
||||
"are allowed - Activate this command by typing": "are allowed. Activate typing",
|
||||
"Are you sure?": "Such certainty?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "Attach file",
|
||||
"Attention to detail": "",
|
||||
"Audio": "Audio",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Import Modelfiles",
|
||||
"Import Prompts": "Import Promptos",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Include `--api` flag when running stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Input commands",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maximum of 3 models can be downloaded simultaneously. Please try again later.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Select a mode very choose",
|
||||
"Select a model": "Select a model much choice",
|
||||
"Select an Ollama instance": "Select an Ollama instance very choose",
|
||||
"Select model": "",
|
||||
"Select model": "Select model much choice",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Send a Message much message",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variable very variable",
|
||||
"variable to have them replaced with clipboard content.": "variable to have them replaced with clipboard content. Very replace.",
|
||||
"Version": "Version much version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web very web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "",
|
||||
"are allowed - Activate this command by typing": "",
|
||||
"Are you sure?": "",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Audio": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Version": "",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "",
|
||||
"are allowed - Activate this command by typing": "",
|
||||
"Are you sure?": "",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Audio": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Version": "",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Agregue una descripción corta de lo que este modelfile hace",
|
||||
"Add a short title for this prompt": "Agregue un título corto para este Prompt",
|
||||
"Add a tag": "Agregar una etiqueta",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Agregar un prompt personalizado",
|
||||
"Add Docs": "Agregar Documentos",
|
||||
"Add Files": "Agregar Archivos",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "Chats archivados",
|
||||
"are allowed - Activate this command by typing": "están permitidos - Active este comando escribiendo",
|
||||
"Are you sure?": "¿Está seguro?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Adjuntar archivo",
|
||||
"Attention to detail": "Detalle preciso",
|
||||
"Audio": "Audio",
|
||||
"August": "",
|
||||
"Auto-playback response": "Respuesta de reproducción automática",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importar Modelfiles",
|
||||
"Import Prompts": "Importar Prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Incluir el indicador `--api` al ejecutar stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Ingresar comandos",
|
||||
"Interface": "Interfaz",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Máximo de Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Se pueden descargar un máximo de 3 modelos simultáneamente. Por favor, inténtelo de nuevo más tarde.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Selecciona un modo",
|
||||
"Select a model": "Selecciona un modelo",
|
||||
"Select an Ollama instance": "Seleccione una instancia de Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Selecciona un modelo",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Enviar un Mensaje",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable para reemplazarlos con el contenido del portapapeles.",
|
||||
"Version": "Versión",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "توضیح کوتاهی در مورد کاری که این فایل\u200cمدل انجام می دهد اضافه کنید",
|
||||
"Add a short title for this prompt": "یک عنوان کوتاه برای این درخواست اضافه کنید",
|
||||
"Add a tag": "اضافه کردن یک تگ",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "اضافه کردن یک درخواست سفارشی",
|
||||
"Add Docs": "اضافه کردن اسناد",
|
||||
"Add Files": "اضافه کردن فایل\u200cها",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "آرشیو تاریخچه چت",
|
||||
"are allowed - Activate this command by typing": "مجاز هستند - این دستور را با تایپ کردن این فعال کنید:",
|
||||
"Are you sure?": "آیا مطمئن هستید؟",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "پیوست فایل",
|
||||
"Attention to detail": "دقیق",
|
||||
"Audio": "صدا",
|
||||
"August": "",
|
||||
"Auto-playback response": "پخش خودکار پاسخ ",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "ایمپورت فایل\u200cهای مدل",
|
||||
"Import Prompts": "ایمپورت پرامپت\u200cها",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "فلگ `--api` را هنکام اجرای stable-diffusion-webui استفاده کنید.",
|
||||
"Input commands": "",
|
||||
"Input commands": "ورودی دستورات",
|
||||
"Interface": "رابط",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "حداکثر توکن",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "حداکثر 3 مدل را می توان به طور همزمان دانلود کرد. لطفاً بعداً دوباره امتحان کنید.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "یک حالت انتخاب کنید",
|
||||
"Select a model": "انتخاب یک مدل",
|
||||
"Select an Ollama instance": "انتخاب یک نمونه از اولاما",
|
||||
"Select model": "",
|
||||
"Select model": "انتخاب یک مدل",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "ارسال یک پیام",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "متغیر",
|
||||
"variable to have them replaced with clipboard content.": "متغیر برای جایگزینی آنها با محتوای کلیپ بورد.",
|
||||
"Version": "نسخه",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "وب",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Arkistoidut keskustelut",
|
||||
"are allowed - Activate this command by typing": "ovat sallittuja - Aktivoi tämä komento kirjoittamalla",
|
||||
"Are you sure?": "Oletko varma?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Liitä tiedosto",
|
||||
"Attention to detail": "Huomio yksityiskohtiin",
|
||||
"Audio": "Ääni",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Maksimitokenit",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Enintään 3 mallia voidaan ladata samanaikaisesti. Yritä myöhemmin uudelleen.",
|
||||
"May": "toukokuu",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Vähimmäispisteet",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "muuttuja",
|
||||
"variable to have them replaced with clipboard content.": "muuttuja korvataan leikepöydän sisällöllä.",
|
||||
"Version": "Versio",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Varoitus: Jos päivität tai vaihdat upotusmallia, sinun on tuotava kaikki asiakirjat uudelleen.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Ajouter une courte description de ce que fait ce fichier de modèle",
|
||||
"Add a short title for this prompt": "Ajouter un court titre pour ce prompt",
|
||||
"Add a tag": "Ajouter un tag",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Ajouter un prompt personnalisé",
|
||||
"Add Docs": "Ajouter des documents",
|
||||
"Add Files": "Ajouter des fichiers",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "enregistrement du chat",
|
||||
"are allowed - Activate this command by typing": "sont autorisés - Activez cette commande en tapant",
|
||||
"Are you sure?": "Êtes-vous sûr ?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Joindre un fichier",
|
||||
"Attention to detail": "Attention aux détails",
|
||||
"Audio": "Audio",
|
||||
"August": "",
|
||||
"Auto-playback response": "Réponse en lecture automatique",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importer les fichiers de modèle",
|
||||
"Import Prompts": "Importer les prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inclure l'indicateur `--api` lors de l'exécution de stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Entrez des commandes d'entrée",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Tokens maximaux",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé simultanément. Veuillez réessayer plus tard.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Sélectionnez un mode",
|
||||
"Select a model": "Sélectionnez un modèle",
|
||||
"Select an Ollama instance": "Sélectionner une instance Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Sélectionnez un modèle",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Envoyer un message",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable pour les remplacer par le contenu du presse-papiers.",
|
||||
"Version": "Version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Ajouter une courte description de ce que fait ce fichier de modèle",
|
||||
"Add a short title for this prompt": "Ajouter un court titre pour ce prompt",
|
||||
"Add a tag": "Ajouter un tag",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Ajouter un prompt personnalisé",
|
||||
"Add Docs": "Ajouter des documents",
|
||||
"Add Files": "Ajouter des fichiers",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "enregistrement du chat",
|
||||
"are allowed - Activate this command by typing": "sont autorisés - Activez cette commande en tapant",
|
||||
"Are you sure?": "Êtes-vous sûr ?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Joindre un fichier",
|
||||
"Attention to detail": "Attention aux détails",
|
||||
"Audio": "Audio",
|
||||
"August": "",
|
||||
"Auto-playback response": "Réponse en lecture automatique",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importer les fichiers de modèle",
|
||||
"Import Prompts": "Importer les prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inclure le drapeau `--api` lors de l'exécution de stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Entrez les commandes d'entrée",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Tokens maximaux",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Un maximum de 3 modèles peut être téléchargé simultanément. Veuillez réessayer plus tard.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Sélectionnez un mode",
|
||||
"Select a model": "Sélectionner un modèle",
|
||||
"Select an Ollama instance": "Sélectionner une instance Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Sélectionner un modèle",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Envoyer un message",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variable",
|
||||
"variable to have them replaced with clipboard content.": "variable pour les remplacer par le contenu du presse-papiers.",
|
||||
"Version": "Version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "צ'אטים מאורכבים",
|
||||
"are allowed - Activate this command by typing": "מותרים - הפעל פקודה זו על ידי הקלדה",
|
||||
"Are you sure?": "האם אתה בטוח?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "צרף קובץ",
|
||||
"Attention to detail": "תשומת לב לפרטים",
|
||||
"Audio": "אודיו",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "מקסימום טוקנים",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "ניתן להוריד מקסימום 3 מודלים בו זמנית. אנא נסה שוב מאוחר יותר.",
|
||||
"May": "מאי",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "ציון מינימלי",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "",
|
||||
"Version": "גרסה",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "רשת",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "यह मॉडलफ़ाइल क्या करती है इसके बारे में एक संक्षिप्त विवरण जोड़ें",
|
||||
"Add a short title for this prompt": "इस संकेत के लिए एक संक्षिप्त शीर्षक जोड़ें",
|
||||
"Add a tag": "एक टैग जोड़े",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "अनुकूल संकेत जोड़ें",
|
||||
"Add Docs": "दस्तावेज़ जोड़ें",
|
||||
"Add Files": "फाइलें जोड़ें",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "संग्रहीत चैट",
|
||||
"are allowed - Activate this command by typing": "अनुमति है - टाइप करके इस कमांड को सक्रिय करें",
|
||||
"Are you sure?": "क्या आपको यकीन है?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "फ़ाइल atta",
|
||||
"Attention to detail": "विस्तार पर ध्यान",
|
||||
"Audio": "ऑडियो",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "मॉडल फ़ाइलें आयात करें",
|
||||
"Import Prompts": "प्रॉम्प्ट आयात करें",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "stable-diffusion-webui चलाते समय `--api` ध्वज शामिल करें",
|
||||
"Input commands": "",
|
||||
"Input commands": "इनपुट क命",
|
||||
"Interface": "इंटरफेस",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "अधिकतम टोकन",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "अधिकतम 3 मॉडल एक साथ डाउनलोड किये जा सकते हैं। कृपया बाद में पुन: प्रयास करें।",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "न्यूनतम स्कोर",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "एक मोड चुनें",
|
||||
"Select a model": "एक मॉडल चुनें",
|
||||
"Select an Ollama instance": "एक Ollama Instance चुनें",
|
||||
"Select model": "",
|
||||
"Select model": "मॉडल चुनें",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "एक संदेश भेजो",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "",
|
||||
"variable to have them replaced with clipboard content.": "उन्हें क्लिपबोर्ड सामग्री से बदलने के लिए वेरिएबल।",
|
||||
"Version": "संस्करण",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "चेतावनी: यदि आप अपने एम्बेडिंग मॉडल को अपडेट या बदलते हैं, तो आपको सभी दस्तावेज़ों को फिर से आयात करने की आवश्यकता होगी।",
|
||||
"Web": "वेब",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Arhivirani razgovori",
|
||||
"are allowed - Activate this command by typing": "su dopušteni - Aktivirajte ovu naredbu upisivanjem",
|
||||
"Are you sure?": "Jeste li sigurni?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Priloži datoteku",
|
||||
"Attention to detail": "Pažnja na detalje",
|
||||
"Audio": "Audio",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Maksimalni tokeni",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksimalno 3 modela mogu se preuzeti istovremeno. Pokušajte ponovo kasnije.",
|
||||
"May": "Svibanj",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Minimalna ocjena",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "varijabla",
|
||||
"variable to have them replaced with clipboard content.": "varijabla za zamjenu sadržajem međuspremnika.",
|
||||
"Version": "Verzija",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Upozorenje: Ako ažurirate ili promijenite svoj model za umetanje, morat ćete ponovno uvesti sve dokumente.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "Postavke web učitavanja",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Chat archiviate",
|
||||
"are allowed - Activate this command by typing": "sono consentiti - Attiva questo comando digitando",
|
||||
"Are you sure?": "Sei sicuro?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Allega file",
|
||||
"Attention to detail": "Attenzione ai dettagli",
|
||||
"Audio": "Audio",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max token",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "È possibile scaricare un massimo di 3 modelli contemporaneamente. Riprova più tardi.",
|
||||
"May": "Maggio",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Punteggio minimo",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variabile",
|
||||
"variable to have them replaced with clipboard content.": "variabile per farli sostituire con il contenuto degli appunti.",
|
||||
"Version": "Versione",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Attenzione: se aggiorni o cambi il tuo modello di embedding, dovrai reimportare tutti i documenti.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "Impostazioni del caricatore Web",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "このモデルファイルの機能に関する簡単な説明を追加",
|
||||
"Add a short title for this prompt": "このプロンプトの短いタイトルを追加",
|
||||
"Add a tag": "タグを追加",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "カスタムプロンプトを追加",
|
||||
"Add Docs": "ドキュメントを追加",
|
||||
"Add Files": "ファイルを追加",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "チャット記録",
|
||||
"are allowed - Activate this command by typing": "が許可されています - 次のように入力してこのコマンドをアクティブ化します",
|
||||
"Are you sure?": "よろしいですか?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "ファイルを添付する",
|
||||
"Attention to detail": "詳細に注意する",
|
||||
"Audio": "オーディオ",
|
||||
"August": "",
|
||||
"Auto-playback response": "応答の自動再生",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "モデルファイルをインポート",
|
||||
"Import Prompts": "プロンプトをインポート",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Input commands": "",
|
||||
"Input commands": "入力コマンド",
|
||||
"Interface": "インターフェース",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "最大トークン数",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "同時にダウンロードできるモデルは最大 3 つです。後でもう一度お試しください。",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "モードを選択",
|
||||
"Select a model": "モデルを選択",
|
||||
"Select an Ollama instance": "Ollama インスタンスを選択",
|
||||
"Select model": "",
|
||||
"Select model": "モデルを選択",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "メッセージを送信",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "変数",
|
||||
"variable to have them replaced with clipboard content.": "クリップボードの内容に置き換える変数。",
|
||||
"Version": "バージョン",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "ウェブ",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "დაამატე მოკლე აღწერა იმის შესახებ, თუ რას აკეთებს ეს მოდელური ფაილი",
|
||||
"Add a short title for this prompt": "დაამატე მოკლე სათაური ამ მოთხოვნისთვის",
|
||||
"Add a tag": "დაამატე ტეგი",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "პირველადი მოთხოვნის დამატება",
|
||||
"Add Docs": "დოკუმენტის დამატება",
|
||||
"Add Files": "ფაილების დამატება",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "ჩატის ისტორიის არქივი",
|
||||
"are allowed - Activate this command by typing": "დაშვებულია - ბრძანების გასააქტიურებლად აკრიფეთ:",
|
||||
"Are you sure?": "დარწმუნებული ხარ?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "ფაილის ჩაწერა",
|
||||
"Attention to detail": "დეტალური მიმართვა",
|
||||
"Audio": "ხმოვანი",
|
||||
"August": "",
|
||||
"Auto-playback response": "ავტომატური დაკვრის პასუხი",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "მოდელური ფაილების იმპორტი",
|
||||
"Import Prompts": "მოთხოვნების იმპორტი",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "ჩართეთ `--api` დროშა stable-diffusion-webui-ის გაშვებისას",
|
||||
"Input commands": "",
|
||||
"Input commands": "შეყვანით ბრძანებებს",
|
||||
"Interface": "ინტერფეისი",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "მაქსიმალური ტოკენები",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "მაქსიმუმ 3 მოდელის ჩამოტვირთვა შესაძლებელია ერთდროულად. Გთხოვთ სცადოთ მოგვიანებით.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "რეჟიმის არჩევა",
|
||||
"Select a model": "მოდელის არჩევა",
|
||||
"Select an Ollama instance": "",
|
||||
"Select model": "",
|
||||
"Select model": "მოდელის არჩევა",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "შეტყობინების გაგზავნა",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "ცვლადი",
|
||||
"variable to have them replaced with clipboard content.": "ცვლადი, რომ შეცვალოს ისინი ბუფერში შიგთავსით.",
|
||||
"Version": "ვერსია",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "ვები",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "이 모델파일이 하는 일에 대한 간단한 설명 추가",
|
||||
"Add a short title for this prompt": "이 프롬프트에 대한 간단한 제목 추가",
|
||||
"Add a tag": "태그 추가",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "프롬프트 추가",
|
||||
"Add Docs": "문서 추가",
|
||||
"Add Files": "파일 추가",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "채팅 기록 아카이브",
|
||||
"are allowed - Activate this command by typing": "허용됩니다 - 이 명령을 활성화하려면 입력하세요.",
|
||||
"Are you sure?": "확실합니까?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "파일 첨부",
|
||||
"Attention to detail": "상세한 주의",
|
||||
"Audio": "오디오",
|
||||
"August": "",
|
||||
"Auto-playback response": "응답 자동 재생",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "모델파일 가져오기",
|
||||
"Import Prompts": "프롬프트 가져오기",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "",
|
||||
"Input commands": "",
|
||||
"Input commands": "입력 명령",
|
||||
"Interface": "인터페이스",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "최대 토큰 수",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "최대 3개의 모델을 동시에 다운로드할 수 있습니다. 나중에 다시 시도하세요.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "모드 선택",
|
||||
"Select a model": "모델 선택",
|
||||
"Select an Ollama instance": "Ollama 인스턴스 선택",
|
||||
"Select model": "",
|
||||
"Select model": "모델 선택",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "메시지 보내기",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "변수",
|
||||
"variable to have them replaced with clipboard content.": "변수를 사용하여 클립보드 내용으로 바꾸세요.",
|
||||
"Version": "버전",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "웹",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Voeg een korte beschrijving toe over wat dit modelfile doet",
|
||||
"Add a short title for this prompt": "Voeg een korte titel toe voor deze prompt",
|
||||
"Add a tag": "Voeg een tag toe",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Voeg een aangepaste prompt toe",
|
||||
"Add Docs": "Voeg Docs toe",
|
||||
"Add Files": "Voege Bestanden toe",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "chatrecord",
|
||||
"are allowed - Activate this command by typing": "zijn toegestaan - Activeer deze commando door te typen",
|
||||
"Are you sure?": "Zeker weten?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "Voeg een bestand toe",
|
||||
"Attention to detail": "",
|
||||
"Audio": "Audio",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importeer Modelfiles",
|
||||
"Import Prompts": "Importeer Prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Voeg `--api` vlag toe bij het uitvoeren van stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Voer commando's in",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maximaal 3 modellen kunnen tegelijkertijd worden gedownload. Probeer het later opnieuw.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Selecteer een modus",
|
||||
"Select a model": "Selecteer een model",
|
||||
"Select an Ollama instance": "Selecteer een Ollama instantie",
|
||||
"Select model": "",
|
||||
"Select model": "Selecteer een model",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Stuur een Bericht",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variabele",
|
||||
"variable to have them replaced with clipboard content.": "variabele om ze te laten vervangen door klembord inhoud.",
|
||||
"Version": "Versie",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "ਆਰਕਾਈਵ ਕੀਤੀਆਂ ਗੱਲਾਂ",
|
||||
"are allowed - Activate this command by typing": "ਅਨੁਮਤ ਹਨ - ਇਸ ਕਮਾਂਡ ਨੂੰ ਟਾਈਪ ਕਰਕੇ ਸਰਗਰਮ ਕਰੋ",
|
||||
"Are you sure?": "ਕੀ ਤੁਸੀਂ ਯਕੀਨਨ ਹੋ?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "ਫਾਈਲ ਜੋੜੋ",
|
||||
"Attention to detail": "ਵੇਰਵੇ 'ਤੇ ਧਿਆਨ",
|
||||
"Audio": "ਆਡੀਓ",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "ਅਧਿਕਤਮ ਟੋਕਨ",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "ਇੱਕ ਸਮੇਂ ਵਿੱਚ ਵੱਧ ਤੋਂ ਵੱਧ 3 ਮਾਡਲ ਡਾਊਨਲੋਡ ਕੀਤੇ ਜਾ ਸਕਦੇ ਹਨ। ਕਿਰਪਾ ਕਰਕੇ ਬਾਅਦ ਵਿੱਚ ਦੁਬਾਰਾ ਕੋਸ਼ਿਸ਼ ਕਰੋ।",
|
||||
"May": "ਮਈ",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "ਘੱਟੋ-ਘੱਟ ਸਕੋਰ",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "ਵੈਰੀਏਬਲ",
|
||||
"variable to have them replaced with clipboard content.": "ਕਲਿੱਪਬੋਰਡ ਸਮੱਗਰੀ ਨਾਲ ਬਦਲਣ ਲਈ ਵੈਰੀਏਬਲ।",
|
||||
"Version": "ਵਰਜਨ",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "ਚੇਤਾਵਨੀ: ਜੇ ਤੁਸੀਂ ਆਪਣਾ ਐਮਬੈੱਡਿੰਗ ਮਾਡਲ ਅੱਪਡੇਟ ਜਾਂ ਬਦਲਦੇ ਹੋ, ਤਾਂ ਤੁਹਾਨੂੰ ਸਾਰੇ ਡਾਕੂਮੈਂਟ ਮੁੜ ਆਯਾਤ ਕਰਨ ਦੀ ਲੋੜ ਹੋਵੇਗੀ।",
|
||||
"Web": "ਵੈਬ",
|
||||
"Web Loader Settings": "ਵੈਬ ਲੋਡਰ ਸੈਟਿੰਗਾਂ",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Zarchiwizowane czaty",
|
||||
"are allowed - Activate this command by typing": "są dozwolone - Aktywuj to polecenie, wpisując",
|
||||
"Are you sure?": "Jesteś pewien?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Dołącz plik",
|
||||
"Attention to detail": "Dbałość o szczegóły",
|
||||
"Audio": "Dźwięk",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Maksymalna liczba tokenów",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Maksymalnie 3 modele można pobierać jednocześnie. Spróbuj ponownie później.",
|
||||
"May": "Maj",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Minimalny wynik",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "zmienna",
|
||||
"variable to have them replaced with clipboard content.": "zmienna która zostanie zastąpiona zawartością schowka.",
|
||||
"Version": "Wersja",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Uwaga: Jeśli uaktualnisz lub zmienisz model osadzania, będziesz musiał ponownie zaimportować wszystkie dokumenty.",
|
||||
"Web": "Sieć",
|
||||
"Web Loader Settings": "Ustawienia pobierania z sieci",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Adicione uma breve descrição sobre o que este arquivo de modelo faz",
|
||||
"Add a short title for this prompt": "Adicione um título curto para este prompt",
|
||||
"Add a tag": "Adicionar uma tag",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Adicionar prompt personalizado",
|
||||
"Add Docs": "Adicionar Documentos",
|
||||
"Add Files": "Adicionar Arquivos",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "Bate-papos arquivados",
|
||||
"are allowed - Activate this command by typing": "são permitidos - Ative este comando digitando",
|
||||
"Are you sure?": "Tem certeza?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "Anexar arquivo",
|
||||
"Attention to detail": "",
|
||||
"Audio": "Áudio",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importar Arquivos de Modelo",
|
||||
"Import Prompts": "Importar Prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Comandos de entrada",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Máximo de Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Máximo de 3 modelos podem ser baixados simultaneamente. Tente novamente mais tarde.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Selecione um modo",
|
||||
"Select a model": "Selecione um modelo",
|
||||
"Select an Ollama instance": "Selecione uma instância Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Selecione um modelo",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Enviar uma Mensagem",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variável",
|
||||
"variable to have them replaced with clipboard content.": "variável para que sejam substituídos pelo conteúdo da área de transferência.",
|
||||
"Version": "Versão",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Adicione uma breve descrição sobre o que este arquivo de modelo faz",
|
||||
"Add a short title for this prompt": "Adicione um título curto para este prompt",
|
||||
"Add a tag": "Adicionar uma tag",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Adicionar um prompt curto",
|
||||
"Add Docs": "Adicionar Documentos",
|
||||
"Add Files": "Adicionar Arquivos",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "Bate-papos arquivados",
|
||||
"are allowed - Activate this command by typing": "são permitidos - Ative este comando digitando",
|
||||
"Are you sure?": "Tem certeza?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "Anexar arquivo",
|
||||
"Attention to detail": "",
|
||||
"Audio": "Áudio",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importar Arquivos de Modelo",
|
||||
"Import Prompts": "Importar Prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inclua a flag `--api` ao executar stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Comandos de entrada",
|
||||
"Interface": "Interface",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Máximo de Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Máximo de 3 modelos podem ser baixados simultaneamente. Tente novamente mais tarde.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Selecione um modo",
|
||||
"Select a model": "Selecione um modelo",
|
||||
"Select an Ollama instance": "Selecione uma instância Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Selecione um modelo",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Enviar uma Mensagem",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variável",
|
||||
"variable to have them replaced with clipboard content.": "variável para que sejam substituídos pelo conteúdo da área de transferência.",
|
||||
"Version": "Versão",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Добавьте краткое описание, что делает этот моделфайл",
|
||||
"Add a short title for this prompt": "Добавьте краткий заголовок для этого ввода",
|
||||
"Add a tag": "Добавьте тэг",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Добавьте пользовательский ввод",
|
||||
"Add Docs": "Добавьте документы",
|
||||
"Add Files": "Добавьте файлы",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "запис на чат",
|
||||
"are allowed - Activate this command by typing": "разрешено - активируйте эту команду вводом",
|
||||
"Are you sure?": "Вы уверены?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Прикрепить файл",
|
||||
"Attention to detail": "детализированный",
|
||||
"Audio": "Аудио",
|
||||
"August": "",
|
||||
"Auto-playback response": "Автоматическое воспроизведение ответа",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Импорт файлов модели",
|
||||
"Import Prompts": "Импорт подсказок",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Добавьте флаг `--api` при запуске stable-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Введите команды",
|
||||
"Interface": "Интерфейс",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Максимальное количество токенов",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимальное количество моделей для загрузки одновременно - 3. Пожалуйста, попробуйте позже.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Выберите режим",
|
||||
"Select a model": "Выберите модель",
|
||||
"Select an Ollama instance": "Выберите экземпляр Ollama",
|
||||
"Select model": "",
|
||||
"Select model": "Выберите модель",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Отправить сообщение",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "переменная",
|
||||
"variable to have them replaced with clipboard content.": "переменная, чтобы их заменить содержимым буфера обмена.",
|
||||
"Version": "Версия",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Веб",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Архивирана ћаскања",
|
||||
"are allowed - Activate this command by typing": "су дозвољени - Покрените ову наредбу уношењем",
|
||||
"Are you sure?": "Да ли сте сигурни?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Приложи датотеку",
|
||||
"Attention to detail": "Пажња на детаље",
|
||||
"Audio": "Звук",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Највише жетона",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Највише 3 модела могу бити преузета истовремено. Покушајте поново касније.",
|
||||
"May": "Мај",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "Памћење",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "Поруке које пошаљете након стварања ваше везе неће бити подељене. Корисници са URL-ом ће моћи да виде дељено ћаскање.",
|
||||
"Minimum Score": "Најмањи резултат",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "променљива",
|
||||
"variable to have them replaced with clipboard content.": "променљива за замену са садржајем оставе.",
|
||||
"Version": "Издање",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Упозорење: ако ажурирате или промените ваш модел уградње, мораћете поново да увезете све документе.",
|
||||
"Web": "Веб",
|
||||
"Web Loader Settings": "Подешавања веб учитавача",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Lägg till en kort beskrivning av vad den här modelfilen gör",
|
||||
"Add a short title for this prompt": "Lägg till en kort titel för denna prompt",
|
||||
"Add a tag": "Lägg till en tagg",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Lägg till en anpassad prompt",
|
||||
"Add Docs": "Lägg till dokument",
|
||||
"Add Files": "Lägg till filer",
|
||||
"Add Memory": "",
|
||||
@ -51,9 +51,8 @@
|
||||
"Archived Chats": "",
|
||||
"are allowed - Activate this command by typing": "är tillåtna - Aktivera detta kommando genom att skriva",
|
||||
"Are you sure?": "Är du säker?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attention to detail": "",
|
||||
"Attach file": "Bifoga fil",
|
||||
"Attention to detail": "Detaljerad uppmärksamhet",
|
||||
"Audio": "Ljud",
|
||||
"August": "",
|
||||
"Auto-playback response": "Automatisk uppspelning",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "Importera modelfiler",
|
||||
"Import Prompts": "Importera prompts",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "Inkludera `--api`-flagga när du kör stabil-diffusion-webui",
|
||||
"Input commands": "",
|
||||
"Input commands": "Indatakommandon",
|
||||
"Interface": "Gränssnitt",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max antal tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Högst 3 modeller kan laddas ner samtidigt. Vänligen försök igen senare.",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "Välj ett läge",
|
||||
"Select a model": "Välj en modell",
|
||||
"Select an Ollama instance": "Välj en Ollama-instans",
|
||||
"Select model": "",
|
||||
"Select model": "Välj en modell",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "Skicka ett meddelande",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "variabel",
|
||||
"variable to have them replaced with clipboard content.": "variabel för att få dem ersatta med urklippsinnehåll.",
|
||||
"Version": "Version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "Webb",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Arşivlenmiş Sohbetler",
|
||||
"are allowed - Activate this command by typing": "izin verilir - Bu komutu yazarak etkinleştirin",
|
||||
"Are you sure?": "Emin misiniz?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Dosya ekle",
|
||||
"Attention to detail": "Ayrıntılara dikkat",
|
||||
"Audio": "Ses",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Maksimum Token",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Aynı anda en fazla 3 model indirilebilir. Lütfen daha sonra tekrar deneyin.",
|
||||
"May": "Mayıs",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Minimum Skor",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "değişken",
|
||||
"variable to have them replaced with clipboard content.": "panodaki içerikle değiştirilmesi için değişken.",
|
||||
"Version": "Sürüm",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Uyarı: Gömme modelinizi günceller veya değiştirirseniz, tüm belgeleri yeniden içe aktarmanız gerekecektir.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "Web Yükleyici Ayarları",
|
||||
|
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "Архівовані чати",
|
||||
"are allowed - Activate this command by typing": "дозволено - активізуйте цю команду набором",
|
||||
"Are you sure?": "Ви впевнені?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Прикріпити файл",
|
||||
"Attention to detail": "Увага до деталей",
|
||||
"Audio": "Аудіо",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Максимальна кількість токенів",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Максимум 3 моделі можна завантажити одночасно. Будь ласка, спробуйте пізніше.",
|
||||
"May": "Травень",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Мінімальний бал",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "змінна",
|
||||
"variable to have them replaced with clipboard content.": "змінна, щоб замінити їх вмістом буфера обміну.",
|
||||
"Version": "Версія",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Попередження: Якщо ви оновлюєте або змінюєте модель вбудовування, вам потрібно буде повторно імпортувати всі документи.",
|
||||
"Web": "Веб",
|
||||
"Web Loader Settings": "Налаштування веб-завантажувача",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "Thêm mô tả ngắn về việc tệp mô tả mô hình (modelfile) này làm gì",
|
||||
"Add a short title for this prompt": "Thêm tiêu đề ngắn cho prompt này",
|
||||
"Add a tag": "Thêm thẻ (tag)",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "Thêm prompt tùy chỉnh",
|
||||
"Add Docs": "Thêm tài liệu",
|
||||
"Add Files": "Thêm tệp",
|
||||
"Add Memory": "",
|
||||
@ -51,7 +51,6 @@
|
||||
"Archived Chats": "bản ghi trò chuyện",
|
||||
"are allowed - Activate this command by typing": "được phép - Kích hoạt lệnh này bằng cách gõ",
|
||||
"Are you sure?": "Bạn có chắc chắn không?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "Đính kèm file",
|
||||
"Attention to detail": "Có sự chú ý đến chi tiết của vấn đề",
|
||||
"Audio": "Âm thanh",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "Max Tokens",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "Tối đa 3 mô hình có thể được tải xuống cùng lúc. Vui lòng thử lại sau.",
|
||||
"May": "Tháng 5",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "Score tối thiểu",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "biến",
|
||||
"variable to have them replaced with clipboard content.": "biến để có chúng được thay thế bằng nội dung clipboard.",
|
||||
"Version": "Version",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "Cảnh báo: Nếu cập nhật hoặc thay đổi embedding model, bạn sẽ cần cập nhật lại tất cả tài liệu.",
|
||||
"Web": "Web",
|
||||
"Web Loader Settings": "Cài đặt Web Loader",
|
||||
|
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "聊天记录存档",
|
||||
"are allowed - Activate this command by typing": "允许 - 通过输入来激活这个命令",
|
||||
"Are you sure?": "你确定吗?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "附件",
|
||||
"Attach file": "添加文件",
|
||||
"Attention to detail": "注重细节",
|
||||
"Audio": "音频",
|
||||
"August": "八月",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "最大令牌数",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "最多可以同时下载 3 个模型,请稍后重试。",
|
||||
"May": "五月",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "最低分",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "变量",
|
||||
"variable to have them replaced with clipboard content.": "变量将被剪贴板内容替换。",
|
||||
"Version": "版本",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "警告: 如果更新或更改 embedding 模型,则需要重新导入所有文档。",
|
||||
"Web": "网页",
|
||||
"Web Loader Settings": "Web 加载器设置",
|
||||
|
@ -17,7 +17,7 @@
|
||||
"Add a short description about what this modelfile does": "為這個 Modelfile 添加一段簡短的描述",
|
||||
"Add a short title for this prompt": "為這個提示詞添加一個簡短的標題",
|
||||
"Add a tag": "新增標籤",
|
||||
"Add custom prompt": "",
|
||||
"Add custom prompt": "新增自定義提示詞",
|
||||
"Add Docs": "新增文件",
|
||||
"Add Files": "新增檔案",
|
||||
"Add Memory": "",
|
||||
@ -51,8 +51,7 @@
|
||||
"Archived Chats": "聊天記錄存檔",
|
||||
"are allowed - Activate this command by typing": "是允許的 - 透過輸入",
|
||||
"Are you sure?": "你確定嗎?",
|
||||
"As you chat with LLMs, the details and preferences it remembers will be shown here.": "",
|
||||
"Attach file": "",
|
||||
"Attach file": "附加檔案",
|
||||
"Attention to detail": "",
|
||||
"Audio": "音訊",
|
||||
"August": "",
|
||||
@ -237,7 +236,7 @@
|
||||
"Import Modelfiles": "匯入 Modelfiles",
|
||||
"Import Prompts": "匯入提示詞",
|
||||
"Include `--api` flag when running stable-diffusion-webui": "在運行 stable-diffusion-webui 時加上 `--api` 標誌",
|
||||
"Input commands": "",
|
||||
"Input commands": "輸入命令",
|
||||
"Interface": "介面",
|
||||
"Invalid Tag": "",
|
||||
"Is Model Vision Capable": "",
|
||||
@ -266,6 +265,7 @@
|
||||
"Max Tokens": "最大 Token 數",
|
||||
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "最多可以同時下載 3 個模型。請稍後再試。",
|
||||
"May": "",
|
||||
"Memories accessible by LLMs will be shown here.": "",
|
||||
"Memory": "",
|
||||
"Messages you send after creating your link won't be shared. Users with the URL will be able to view the shared chat.": "",
|
||||
"Minimum Score": "",
|
||||
@ -396,7 +396,7 @@
|
||||
"Select a mode": "選擇模式",
|
||||
"Select a model": "選擇一個模型",
|
||||
"Select an Ollama instance": "選擇 Ollama 實例",
|
||||
"Select model": "",
|
||||
"Select model": "選擇模型",
|
||||
"Selected models do not support image inputs": "",
|
||||
"Send": "",
|
||||
"Send a Message": "傳送訊息",
|
||||
@ -491,7 +491,6 @@
|
||||
"variable": "變數",
|
||||
"variable to have them replaced with clipboard content.": "變數將替換為剪貼簿內容。",
|
||||
"Version": "版本",
|
||||
"View": "",
|
||||
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "",
|
||||
"Web": "網頁",
|
||||
"Web Loader Settings": "",
|
||||
|
@ -42,6 +42,7 @@
|
||||
import { LITELLM_API_BASE_URL, OLLAMA_API_BASE_URL, OPENAI_API_BASE_URL } from '$lib/constants';
|
||||
import { WEBUI_BASE_URL } from '$lib/constants';
|
||||
import { createOpenAITextStream } from '$lib/apis/streaming';
|
||||
import { queryMemory } from '$lib/apis/memories';
|
||||
|
||||
const i18n = getContext('i18n');
|
||||
|
||||
@ -255,6 +256,28 @@
|
||||
const sendPrompt = async (prompt, parentId, modelId = null) => {
|
||||
const _chatId = JSON.parse(JSON.stringify($chatId));
|
||||
|
||||
let userContext = null;
|
||||
|
||||
if ($settings?.memory ?? false) {
|
||||
const res = await queryMemory(localStorage.token, prompt).catch((error) => {
|
||||
toast.error(error);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (res) {
|
||||
if (res.documents[0].length > 0) {
|
||||
userContext = res.documents.reduce((acc, doc, index) => {
|
||||
const createdAtTimestamp = res.metadatas[index][0].created_at;
|
||||
const createdAtDate = new Date(createdAtTimestamp * 1000).toISOString().split('T')[0];
|
||||
acc.push(`${index + 1}. [${createdAtDate}]. ${doc[0]}`);
|
||||
return acc;
|
||||
}, []);
|
||||
}
|
||||
|
||||
console.log(userContext);
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
(modelId
|
||||
? [modelId]
|
||||
@ -276,6 +299,38 @@
|
||||
modelName: model.custom_info?.name ?? model.name ?? model.id
|
||||
})
|
||||
);
|
||||
// Create response message
|
||||
let responseMessageId = uuidv4();
|
||||
let responseMessage = {
|
||||
parentId: parentId,
|
||||
id: responseMessageId,
|
||||
childrenIds: [],
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
model: model.id,
|
||||
userContext: userContext,
|
||||
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
|
||||
};
|
||||
|
||||
// Add message to history and Set currentId to messageId
|
||||
history.messages[responseMessageId] = responseMessage;
|
||||
history.currentId = responseMessageId;
|
||||
|
||||
// Append messageId to childrenIds of parent message
|
||||
if (parentId !== null) {
|
||||
history.messages[parentId].childrenIds = [
|
||||
...history.messages[parentId].childrenIds,
|
||||
responseMessageId
|
||||
];
|
||||
}
|
||||
|
||||
if (model?.external) {
|
||||
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
|
||||
} else if (model) {
|
||||
await sendPromptOllama(model, prompt, responseMessageId, _chatId);
|
||||
}
|
||||
} else {
|
||||
toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
|
||||
}
|
||||
|
||||
// Create response message
|
||||
@ -329,10 +384,13 @@
|
||||
scrollToBottom();
|
||||
|
||||
const messagesBody = [
|
||||
$settings.system
|
||||
$settings.system || (responseMessage?.userContext ?? null)
|
||||
? {
|
||||
role: 'system',
|
||||
content: $settings.system
|
||||
content:
|
||||
$settings.system + (responseMessage?.userContext ?? null)
|
||||
? `\n\nUser Context:\n${responseMessage.userContext.join('\n')}`
|
||||
: ''
|
||||
}
|
||||
: undefined,
|
||||
...messages
|
||||
@ -585,10 +643,13 @@
|
||||
model: model.id,
|
||||
stream: true,
|
||||
messages: [
|
||||
$settings.system
|
||||
$settings.system || (responseMessage?.userContext ?? null)
|
||||
? {
|
||||
role: 'system',
|
||||
content: $settings.system
|
||||
content:
|
||||
$settings.system + (responseMessage?.userContext ?? null)
|
||||
? `\n\nUser Context:\n${responseMessage.userContext.join('\n')}`
|
||||
: ''
|
||||
}
|
||||
: undefined,
|
||||
...messages
|
||||
|
@ -44,6 +44,7 @@
|
||||
WEBUI_BASE_URL
|
||||
} from '$lib/constants';
|
||||
import { createOpenAITextStream } from '$lib/apis/streaming';
|
||||
import { queryMemory } from '$lib/apis/memories';
|
||||
|
||||
const i18n = getContext('i18n');
|
||||
|
||||
@ -261,6 +262,28 @@
|
||||
const sendPrompt = async (prompt, parentId, modelId = null) => {
|
||||
const _chatId = JSON.parse(JSON.stringify($chatId));
|
||||
|
||||
let userContext = null;
|
||||
|
||||
if ($settings?.memory ?? false) {
|
||||
const res = await queryMemory(localStorage.token, prompt).catch((error) => {
|
||||
toast.error(error);
|
||||
return null;
|
||||
});
|
||||
|
||||
if (res) {
|
||||
if (res.documents[0].length > 0) {
|
||||
userContext = res.documents.reduce((acc, doc, index) => {
|
||||
const createdAtTimestamp = res.metadatas[index][0].created_at;
|
||||
const createdAtDate = new Date(createdAtTimestamp * 1000).toISOString().split('T')[0];
|
||||
acc.push(`${index + 1}. [${createdAtDate}]. ${doc[0]}`);
|
||||
return acc;
|
||||
}, []);
|
||||
}
|
||||
|
||||
console.log(userContext);
|
||||
}
|
||||
}
|
||||
|
||||
await Promise.all(
|
||||
(modelId
|
||||
? [modelId]
|
||||
@ -282,6 +305,38 @@
|
||||
modelName: model.custom_info?.name ?? model.name ?? model.id
|
||||
})
|
||||
);
|
||||
// Create response message
|
||||
let responseMessageId = uuidv4();
|
||||
let responseMessage = {
|
||||
parentId: parentId,
|
||||
id: responseMessageId,
|
||||
childrenIds: [],
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
model: model.id,
|
||||
userContext: userContext,
|
||||
timestamp: Math.floor(Date.now() / 1000) // Unix epoch
|
||||
};
|
||||
|
||||
// Add message to history and Set currentId to messageId
|
||||
history.messages[responseMessageId] = responseMessage;
|
||||
history.currentId = responseMessageId;
|
||||
|
||||
// Append messageId to childrenIds of parent message
|
||||
if (parentId !== null) {
|
||||
history.messages[parentId].childrenIds = [
|
||||
...history.messages[parentId].childrenIds,
|
||||
responseMessageId
|
||||
];
|
||||
}
|
||||
|
||||
if (model?.external) {
|
||||
await sendPromptOpenAI(model, prompt, responseMessageId, _chatId);
|
||||
} else if (model) {
|
||||
await sendPromptOllama(model, prompt, responseMessageId, _chatId);
|
||||
}
|
||||
} else {
|
||||
toast.error($i18n.t(`Model {{modelId}} not found`, { modelId }));
|
||||
}
|
||||
|
||||
// Create response message
|
||||
@ -334,10 +389,13 @@
|
||||
scrollToBottom();
|
||||
|
||||
const messagesBody = [
|
||||
$settings.system
|
||||
$settings.system || (responseMessage?.userContext ?? null)
|
||||
? {
|
||||
role: 'system',
|
||||
content: $settings.system
|
||||
content:
|
||||
$settings.system + (responseMessage?.userContext ?? null)
|
||||
? `\n\nUser Context:\n${responseMessage.userContext.join('\n')}`
|
||||
: ''
|
||||
}
|
||||
: undefined,
|
||||
...messages
|
||||
@ -590,10 +648,13 @@
|
||||
model: model.id,
|
||||
stream: true,
|
||||
messages: [
|
||||
$settings.system
|
||||
$settings.system || (responseMessage?.userContext ?? null)
|
||||
? {
|
||||
role: 'system',
|
||||
content: $settings.system
|
||||
content:
|
||||
$settings.system + (responseMessage?.userContext ?? null)
|
||||
? `\n\nUser Context:\n${responseMessage.userContext.join('\n')}`
|
||||
: ''
|
||||
}
|
||||
: undefined,
|
||||
...messages
|
||||
@ -722,6 +783,7 @@
|
||||
} catch (error) {
|
||||
await handleOpenAIError(error, null, model, responseMessage);
|
||||
}
|
||||
messages = messages;
|
||||
|
||||
stopResponseFlag = false;
|
||||
await tick();
|
||||
|
Loading…
Reference in New Issue
Block a user