Merge branch 'upstream-dev' into dev

This commit is contained in:
Jannik Streidl 2024-10-14 09:50:40 +02:00
commit f0f4de59eb
39 changed files with 1235 additions and 469 deletions

View File

@ -1,6 +1,6 @@
# syntax=docker/dockerfile:1
# Initialize device type args
# use build args in the docker build commmand with --build-arg="BUILDARG=true"
# use build args in the docker build command with --build-arg="BUILDARG=true"
ARG USE_CUDA=false
ARG USE_OLLAMA=false
# Tested with cu117 for CUDA 11 and cu121 for CUDA 12 (default)
@ -11,6 +11,10 @@ ARG USE_CUDA_VER=cu121
# IMPORTANT: If you change the embedding model (sentence-transformers/all-MiniLM-L6-v2) and vice versa, you aren't able to use RAG Chat with your previous documents loaded in the WebUI! You need to re-embed them.
ARG USE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2
ARG USE_RERANKING_MODEL=""
# Tiktoken encoding name; models to use can be found at https://huggingface.co/models?library=tiktoken
ARG USE_TIKTOKEN_ENCODING_NAME="cl100k_base"
ARG BUILD_HASH=dev-build
# Override at your own risk - non-root configurations are untested
ARG UID=0
@ -72,6 +76,10 @@ ENV RAG_EMBEDDING_MODEL="$USE_EMBEDDING_MODEL_DOCKER" \
RAG_RERANKING_MODEL="$USE_RERANKING_MODEL_DOCKER" \
SENTENCE_TRANSFORMERS_HOME="/app/backend/data/cache/embedding/models"
## Tiktoken model settings ##
ENV TIKTOKEN_ENCODING_NAME="$USE_TIKTOKEN_ENCODING_NAME" \
TIKTOKEN_CACHE_DIR="/app/backend/data/cache/tiktoken"
## Hugging Face download cache ##
ENV HF_HOME="/app/backend/data/cache/embedding/models"
@ -131,11 +139,13 @@ RUN pip3 install uv && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir && \
python -c "import os; from sentence_transformers import SentenceTransformer; SentenceTransformer(os.environ['RAG_EMBEDDING_MODEL'], device='cpu')" && \
python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"; \
python -c "import os; import tiktoken; tiktoken.get_encoding(os.environ['TIKTOKEN_ENCODING_NAME'])"; \
fi; \
chown -R $UID:$GID /app/backend/data/

View File

@ -18,7 +18,7 @@ If you're experiencing connection issues, its often due to the WebUI docker c
docker run -d --network=host -v open-webui:/app/backend/data -e OLLAMA_BASE_URL=http://127.0.0.1:11434 --name open-webui --restart always ghcr.io/open-webui/open-webui:main
```
### Error on Slow Reponses for Ollama
### Error on Slow Responses for Ollama
Open WebUI has a default timeout of 5 minutes for Ollama to finish generating the response. If needed, this can be adjusted via the environment variable AIOHTTP_CLIENT_TIMEOUT, which sets the timeout in seconds.

View File

@ -18,7 +18,10 @@ from open_webui.config import (
OPENAI_API_KEYS,
AppConfig,
)
from open_webui.env import AIOHTTP_CLIENT_TIMEOUT
from open_webui.env import (
AIOHTTP_CLIENT_TIMEOUT,
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST,
)
from open_webui.constants import ERROR_MESSAGES
from open_webui.env import SRC_LOG_LEVELS
@ -179,7 +182,7 @@ async def speech(request: Request, user=Depends(get_verified_user)):
async def fetch_url(url, key):
timeout = aiohttp.ClientTimeout(total=3)
timeout = aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST)
try:
headers = {"Authorization": f"Bearer {key}"}
async with aiohttp.ClientSession(timeout=timeout, trust_env=True) as session:

View File

@ -15,6 +15,9 @@ from fastapi import Depends, FastAPI, File, Form, HTTPException, UploadFile, sta
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
from open_webui.apps.webui.models.knowledge import Knowledges
from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
# Document loaders
@ -47,6 +50,8 @@ from open_webui.apps.retrieval.utils import (
from open_webui.apps.webui.models.files import Files
from open_webui.config import (
BRAVE_SEARCH_API_KEY,
TIKTOKEN_ENCODING_NAME,
RAG_TEXT_SPLITTER,
CHUNK_OVERLAP,
CHUNK_SIZE,
CONTENT_EXTRACTION_ENGINE,
@ -102,7 +107,7 @@ from open_webui.utils.misc import (
)
from open_webui.utils.utils import get_admin_user, get_verified_user
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter, TokenTextSplitter
from langchain_community.document_loaders import (
YoutubeLoader,
)
@ -129,6 +134,9 @@ app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = (
app.state.config.CONTENT_EXTRACTION_ENGINE = CONTENT_EXTRACTION_ENGINE
app.state.config.TIKA_SERVER_URL = TIKA_SERVER_URL
app.state.config.TEXT_SPLITTER = RAG_TEXT_SPLITTER
app.state.config.TIKTOKEN_ENCODING_NAME = TIKTOKEN_ENCODING_NAME
app.state.config.CHUNK_SIZE = CHUNK_SIZE
app.state.config.CHUNK_OVERLAP = CHUNK_OVERLAP
@ -171,9 +179,9 @@ def update_embedding_model(
auto_update: bool = False,
):
if embedding_model and app.state.config.RAG_EMBEDDING_ENGINE == "":
import sentence_transformers
from sentence_transformers import SentenceTransformer
app.state.sentence_transformer_ef = sentence_transformers.SentenceTransformer(
app.state.sentence_transformer_ef = SentenceTransformer(
get_model_path(embedding_model, auto_update),
device=DEVICE_TYPE,
trust_remote_code=RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE,
@ -384,18 +392,19 @@ async def get_rag_config(user=Depends(get_admin_user)):
return {
"status": True,
"pdf_extract_images": app.state.config.PDF_EXTRACT_IMAGES,
"file": {
"max_size": app.state.config.FILE_MAX_SIZE,
"max_count": app.state.config.FILE_MAX_COUNT,
},
"content_extraction": {
"engine": app.state.config.CONTENT_EXTRACTION_ENGINE,
"tika_server_url": app.state.config.TIKA_SERVER_URL,
},
"chunk": {
"text_splitter": app.state.config.TEXT_SPLITTER,
"chunk_size": app.state.config.CHUNK_SIZE,
"chunk_overlap": app.state.config.CHUNK_OVERLAP,
},
"file": {
"max_size": app.state.config.FILE_MAX_SIZE,
"max_count": app.state.config.FILE_MAX_COUNT,
},
"youtube": {
"language": app.state.config.YOUTUBE_LOADER_LANGUAGE,
"translation": app.state.YOUTUBE_LOADER_TRANSLATION,
@ -434,6 +443,7 @@ class ContentExtractionConfig(BaseModel):
class ChunkParamUpdateForm(BaseModel):
text_splitter: Optional[str] = None
chunk_size: int
chunk_overlap: int
@ -493,6 +503,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
app.state.config.TIKA_SERVER_URL = form_data.content_extraction.tika_server_url
if form_data.chunk is not None:
app.state.config.TEXT_SPLITTER = form_data.chunk.text_splitter
app.state.config.CHUNK_SIZE = form_data.chunk.chunk_size
app.state.config.CHUNK_OVERLAP = form_data.chunk.chunk_overlap
@ -539,6 +550,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
"tika_server_url": app.state.config.TIKA_SERVER_URL,
},
"chunk": {
"text_splitter": app.state.config.TEXT_SPLITTER,
"chunk_size": app.state.config.CHUNK_SIZE,
"chunk_overlap": app.state.config.CHUNK_OVERLAP,
},
@ -599,11 +611,10 @@ class QuerySettingsForm(BaseModel):
async def update_query_settings(
form_data: QuerySettingsForm, user=Depends(get_admin_user)
):
app.state.config.RAG_TEMPLATE = (
form_data.template if form_data.template != "" else DEFAULT_RAG_TEMPLATE
)
app.state.config.RAG_TEMPLATE = form_data.template
app.state.config.TOP_K = form_data.k if form_data.k else 4
app.state.config.RELEVANCE_THRESHOLD = form_data.r if form_data.r else 0.0
app.state.config.ENABLE_RAG_HYBRID_SEARCH = (
form_data.hybrid if form_data.hybrid else False
)
@ -648,18 +659,41 @@ def save_docs_to_vector_db(
raise ValueError(ERROR_MESSAGES.DUPLICATE_CONTENT)
if split:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=app.state.config.CHUNK_SIZE,
chunk_overlap=app.state.config.CHUNK_OVERLAP,
add_start_index=True,
)
if app.state.config.TEXT_SPLITTER in ["", "character"]:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=app.state.config.CHUNK_SIZE,
chunk_overlap=app.state.config.CHUNK_OVERLAP,
add_start_index=True,
)
elif app.state.config.TEXT_SPLITTER == "token":
text_splitter = TokenTextSplitter(
encoding_name=app.state.config.TIKTOKEN_ENCODING_NAME,
chunk_size=app.state.config.CHUNK_SIZE,
chunk_overlap=app.state.config.CHUNK_OVERLAP,
add_start_index=True,
)
else:
raise ValueError(ERROR_MESSAGES.DEFAULT("Invalid text splitter"))
docs = text_splitter.split_documents(docs)
if len(docs) == 0:
raise ValueError(ERROR_MESSAGES.EMPTY_CONTENT)
texts = [doc.page_content for doc in docs]
metadatas = [{**doc.metadata, **(metadata if metadata else {})} for doc in docs]
metadatas = [
{
**doc.metadata,
**(metadata if metadata else {}),
"embedding_config": json.dumps(
{
"engine": app.state.config.RAG_EMBEDDING_ENGINE,
"model": app.state.config.RAG_EMBEDDING_MODEL,
}
),
}
for doc in docs
]
# ChromaDB does not like datetime formats
# for meta-data so convert them to string.
@ -1255,6 +1289,7 @@ def delete_entries_from_collection(form_data: DeleteForm, user=Depends(get_admin
@app.post("/reset/db")
def reset_vector_db(user=Depends(get_admin_user)):
VECTOR_DB_CLIENT.reset()
Knowledges.delete_all_knowledge()
@app.post("/reset/uploads")
@ -1277,28 +1312,6 @@ def reset_upload_dir(user=Depends(get_admin_user)) -> bool:
print(f"The directory {folder} does not exist")
except Exception as e:
print(f"Failed to process the directory {folder}. Reason: {e}")
return True
@app.post("/reset")
def reset(user=Depends(get_admin_user)) -> bool:
folder = f"{UPLOAD_DIR}"
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
log.error("Failed to delete %s. Reason: %s" % (file_path, e))
try:
VECTOR_DB_CLIENT.reset()
except Exception as e:
log.exception(e)
return True

View File

@ -19,6 +19,7 @@ from open_webui.apps.retrieval.vector.connector import VECTOR_DB_CLIENT
from open_webui.utils.misc import get_last_user_message
from open_webui.env import SRC_LOG_LEVELS
from open_webui.config import DEFAULT_RAG_TEMPLATE
log = logging.getLogger(__name__)
@ -239,8 +240,13 @@ def query_collection_with_hybrid_search(
def rag_template(template: str, context: str, query: str):
count = template.count("[context]")
assert "[context]" in template, "RAG template does not contain '[context]'"
if template == "":
template = DEFAULT_RAG_TEMPLATE
if "[context]" not in template and "{{CONTEXT}}" not in template:
log.debug(
"WARNING: The RAG template does not contain the '[context]' or '{{CONTEXT}}' placeholder."
)
if "<context>" in context and "</context>" in context:
log.debug(
@ -249,14 +255,25 @@ def rag_template(template: str, context: str, query: str):
"nothing, or the user might be trying to hack something."
)
query_placeholders = []
if "[query]" in context:
query_placeholder = f"[query-{str(uuid.uuid4())}]"
query_placeholder = "{{QUERY" + str(uuid.uuid4()) + "}}"
template = template.replace("[query]", query_placeholder)
template = template.replace("[context]", context)
query_placeholders.append(query_placeholder)
if "{{QUERY}}" in context:
query_placeholder = "{{QUERY" + str(uuid.uuid4()) + "}}"
template = template.replace("{{QUERY}}", query_placeholder)
query_placeholders.append(query_placeholder)
template = template.replace("[context]", context)
template = template.replace("{{CONTEXT}}", context)
template = template.replace("[query]", query)
template = template.replace("{{QUERY}}", query)
for query_placeholder in query_placeholders:
template = template.replace(query_placeholder, query)
else:
template = template.replace("[context]", context)
template = template.replace("[query]", query)
return template
@ -375,8 +392,21 @@ def get_rag_context(
for context in relevant_contexts:
try:
if "documents" in context:
file_names = list(
set(
[
metadata["name"]
for metadata in context["metadatas"][0]
if metadata is not None and "name" in metadata
]
)
)
contexts.append(
"\n\n".join(
(", ".join(file_names) + ":\n\n")
if file_names
else ""
+ "\n\n".join(
[text for text in context["documents"][0] if text is not None]
)
)
@ -393,6 +423,7 @@ def get_rag_context(
except Exception as e:
log.exception(e)
print(contexts, citations)
return contexts, citations

View File

@ -61,6 +61,9 @@ class ChatModel(BaseModel):
class ChatForm(BaseModel):
chat: dict
class ChatTitleMessagesForm(BaseModel):
title: str
messages: list[dict]
class ChatTitleForm(BaseModel):
title: str

View File

@ -154,5 +154,15 @@ class KnowledgeTable:
except Exception:
return False
def delete_all_knowledge(self) -> bool:
with get_db() as db:
try:
db.query(Knowledge).delete()
db.commit()
return True
except Exception:
return False
Knowledges = KnowledgeTable()

View File

@ -8,7 +8,7 @@ from open_webui.apps.webui.internal.db import Base, get_db
from open_webui.env import SRC_LOG_LEVELS
from pydantic import BaseModel, ConfigDict
from sqlalchemy import BigInteger, Column, String, JSON
from sqlalchemy import BigInteger, Column, String, JSON, PrimaryKeyConstraint
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["MODELS"])
@ -19,11 +19,14 @@ log.setLevel(SRC_LOG_LEVELS["MODELS"])
####################
class Tag(Base):
__tablename__ = "tag"
id = Column(String, primary_key=True)
id = Column(String)
name = Column(String)
user_id = Column(String)
meta = Column(JSON, nullable=True)
# Unique constraint ensuring (id, user_id) is unique, not just the `id` column
__table_args__ = (PrimaryKeyConstraint("id", "user_id", name="pk_id_user_id"),)
class TagModel(BaseModel):
id: str
@ -57,7 +60,8 @@ class TagTable:
return TagModel.model_validate(result)
else:
return None
except Exception:
except Exception as e:
print(e)
return None
def get_tag_by_name_and_user_id(
@ -78,11 +82,15 @@ class TagTable:
for tag in (db.query(Tag).filter_by(user_id=user_id).all())
]
def get_tags_by_ids(self, ids: list[str]) -> list[TagModel]:
def get_tags_by_ids_and_user_id(
self, ids: list[str], user_id: str
) -> list[TagModel]:
with get_db() as db:
return [
TagModel.model_validate(tag)
for tag in (db.query(Tag).filter(Tag.id.in_(ids)).all())
for tag in (
db.query(Tag).filter(Tag.id.in_(ids), Tag.user_id == user_id).all()
)
]
def delete_tag_by_name_and_user_id(self, name: str, user_id: str) -> bool:

View File

@ -465,7 +465,7 @@ async def get_chat_tags_by_id(id: str, user=Depends(get_verified_user)):
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
if chat:
tags = chat.meta.get("tags", [])
return Tags.get_tags_by_ids(tags)
return Tags.get_tags_by_ids_and_user_id(tags, user.id)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
@ -494,7 +494,7 @@ async def add_tag_by_id_and_tag_name(
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
tags = chat.meta.get("tags", [])
return Tags.get_tags_by_ids(tags)
return Tags.get_tags_by_ids_and_user_id(tags, user.id)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.DEFAULT()
@ -519,7 +519,7 @@ async def delete_tag_by_id_and_tag_name(
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
tags = chat.meta.get("tags", [])
return Tags.get_tags_by_ids(tags)
return Tags.get_tags_by_ids_and_user_id(tags, user.id)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND
@ -543,7 +543,7 @@ async def delete_all_chat_tags_by_id(id: str, user=Depends(get_verified_user)):
chat = Chats.get_chat_by_id_and_user_id(id, user.id)
tags = chat.meta.get("tags", [])
return Tags.get_tags_by_ids(tags)
return Tags.get_tags_by_ids_and_user_id(tags, user.id)
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=ERROR_MESSAGES.NOT_FOUND

View File

@ -1,16 +1,14 @@
import site
from pathlib import Path
import black
import markdown
from open_webui.apps.webui.models.chats import ChatTitleMessagesForm
from open_webui.config import DATA_DIR, ENABLE_ADMIN_EXPORT
from open_webui.env import FONTS_DIR
from open_webui.constants import ERROR_MESSAGES
from fastapi import APIRouter, Depends, HTTPException, Response, status
from fpdf import FPDF
from pydantic import BaseModel
from starlette.responses import FileResponse
from open_webui.utils.misc import get_gravatar_url
from open_webui.utils.pdf_generator import PDFGenerator
from open_webui.utils.utils import get_admin_user
router = APIRouter()
@ -56,58 +54,19 @@ class ChatForm(BaseModel):
@router.post("/pdf")
async def download_chat_as_pdf(
form_data: ChatForm,
form_data: ChatTitleMessagesForm,
):
global FONTS_DIR
try:
pdf_bytes = PDFGenerator(form_data).generate_chat_pdf()
pdf = FPDF()
pdf.add_page()
# When running using `pip install` the static directory is in the site packages.
if not FONTS_DIR.exists():
FONTS_DIR = Path(site.getsitepackages()[0]) / "static/fonts"
# When running using `pip install -e .` the static directory is in the site packages.
# This path only works if `open-webui serve` is run from the root of this project.
if not FONTS_DIR.exists():
FONTS_DIR = Path("./backend/static/fonts")
pdf.add_font("NotoSans", "", f"{FONTS_DIR}/NotoSans-Regular.ttf")
pdf.add_font("NotoSans", "b", f"{FONTS_DIR}/NotoSans-Bold.ttf")
pdf.add_font("NotoSans", "i", f"{FONTS_DIR}/NotoSans-Italic.ttf")
pdf.add_font("NotoSansKR", "", f"{FONTS_DIR}/NotoSansKR-Regular.ttf")
pdf.add_font("NotoSansJP", "", f"{FONTS_DIR}/NotoSansJP-Regular.ttf")
pdf.add_font("NotoSansSC", "", f"{FONTS_DIR}/NotoSansSC-Regular.ttf")
pdf.set_font("NotoSans", size=12)
pdf.set_fallback_fonts(["NotoSansKR", "NotoSansJP", "NotoSansSC"])
pdf.set_auto_page_break(auto=True, margin=15)
# Adjust the effective page width for multi_cell
effective_page_width = (
pdf.w - 2 * pdf.l_margin - 10
) # Subtracted an additional 10 for extra padding
# Add chat messages
for message in form_data.messages:
role = message["role"]
content = message["content"]
pdf.set_font("NotoSans", "B", size=14) # Bold for the role
pdf.multi_cell(effective_page_width, 10, f"{role.upper()}", 0, "L")
pdf.ln(1) # Extra space between messages
pdf.set_font("NotoSans", size=10) # Regular for content
pdf.multi_cell(effective_page_width, 6, content, 0, "L")
pdf.ln(1.5) # Extra space between messages
# Save the pdf with name .pdf
pdf_bytes = pdf.output()
return Response(
content=bytes(pdf_bytes),
media_type="application/pdf",
headers={"Content-Disposition": "attachment;filename=chat.pdf"},
)
return Response(
content=pdf_bytes,
media_type="application/pdf",
headers={"Content-Disposition": "attachment;filename=chat.pdf"},
)
except Exception as e:
print(e)
raise HTTPException(status_code=400, detail=str(e))
@router.get("/db/download")

View File

@ -1014,6 +1014,22 @@ RAG_RERANKING_MODEL_TRUST_REMOTE_CODE = (
os.environ.get("RAG_RERANKING_MODEL_TRUST_REMOTE_CODE", "").lower() == "true"
)
RAG_TEXT_SPLITTER = PersistentConfig(
"RAG_TEXT_SPLITTER",
"rag.text_splitter",
os.environ.get("RAG_TEXT_SPLITTER", ""),
)
TIKTOKEN_CACHE_DIR = os.environ.get("TIKTOKEN_CACHE_DIR", f"{CACHE_DIR}/tiktoken")
TIKTOKEN_ENCODING_NAME = PersistentConfig(
"TIKTOKEN_ENCODING_NAME",
"rag.tiktoken_encoding_name",
os.environ.get("TIKTOKEN_ENCODING_NAME", "cl100k_base"),
)
CHUNK_SIZE = PersistentConfig(
"CHUNK_SIZE", "rag.chunk_size", int(os.environ.get("CHUNK_SIZE", "1000"))
)

View File

@ -20,7 +20,7 @@ class ERROR_MESSAGES(str, Enum):
def __str__(self) -> str:
return super().__str__()
DEFAULT = lambda err="": f"Something went wrong :/\n{err if err else ''}"
DEFAULT = lambda err="": f"Something went wrong :/\n[ERROR: {err if err else ''}]"
ENV_VAR_NOT_FOUND = "Required environment variable not found. Terminating now."
CREATE_USER_ERROR = "Oops! Something went wrong while creating your account. Please try again later. If the issue persists, contact support for assistance."
DELETE_USER_ERROR = "Oops! Something went wrong. We encountered an issue while trying to delete the user. Please give it another shot."

View File

@ -230,6 +230,8 @@ if FROM_INIT_PY:
DATA_DIR = Path(os.getenv("DATA_DIR", OPEN_WEBUI_DIR / "data"))
STATIC_DIR = Path(os.getenv("STATIC_DIR", OPEN_WEBUI_DIR / "static"))
FONTS_DIR = Path(os.getenv("FONTS_DIR", OPEN_WEBUI_DIR / "static" / "fonts"))
FRONTEND_BUILD_DIR = Path(os.getenv("FRONTEND_BUILD_DIR", BASE_DIR / "build")).resolve()
@ -361,6 +363,20 @@ else:
except Exception:
AIOHTTP_CLIENT_TIMEOUT = 300
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = os.environ.get(
"AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST", "3"
)
if AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST == "":
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = None
else:
try:
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = int(
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST
)
except Exception:
AIOHTTP_CLIENT_TIMEOUT_OPENAI_MODEL_LIST = 3
####################################
# OFFLINE_MODE
####################################

View File

@ -578,7 +578,7 @@ class ChatCompletionMiddleware(BaseHTTPMiddleware):
}
# Initialize data_items to store additional data to be sent to the client
# Initalize contexts and citation
# Initialize contexts and citation
data_items = []
contexts = []
citations = []
@ -990,11 +990,13 @@ async def get_all_models():
owned_by = model["owned_by"]
if "pipe" in model:
pipe = model["pipe"]
if "info" in model and "meta" in model["info"]:
action_ids.extend(model["info"]["meta"].get("actionIds", []))
break
if custom_model.meta:
meta = custom_model.meta.model_dump()
if "actionIds" in meta:
action_ids.extend(meta["actionIds"])
models.append(
{
"id": custom_model.id,
@ -2277,7 +2279,7 @@ async def oauth_login(provider: str, request: Request):
# 2. If OAUTH_MERGE_ACCOUNTS_BY_EMAIL is true, find a user with the email address provided via OAuth
# - This is considered insecure in general, as OAuth providers do not always verify email addresses
# 3. If there is no user, and ENABLE_OAUTH_SIGNUP is true, create a user
# - Email addresses are considered unique, so we fail registration if the email address is alreayd taken
# - Email addresses are considered unique, so we fail registration if the email address is already taken
@app.get("/oauth/{provider}/callback")
async def oauth_callback(provider: str, request: Request, response: Response):
if provider not in OAUTH_PROVIDERS:
@ -2385,7 +2387,7 @@ async def oauth_callback(provider: str, request: Request, response: Response):
key="token",
value=jwt_token,
httponly=True, # Ensures the cookie is not accessible via JavaScript
samesite=WEBUI_SESSION_COOKIE_SAME_SITE,
samesite=WEBUI_SESSION_COOKIE_SAME_SITE,
secure=WEBUI_SESSION_COOKIE_SECURE,
)

View File

@ -0,0 +1,67 @@
"""Update tags
Revision ID: 3ab32c4b8f59
Revises: 1af9b942657b
Create Date: 2024-10-09 21:02:35.241684
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, select, update, column
from sqlalchemy.engine.reflection import Inspector
import json
revision = "3ab32c4b8f59"
down_revision = "1af9b942657b"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
# Inspecting the 'tag' table constraints and structure
existing_pk = inspector.get_pk_constraint("tag")
unique_constraints = inspector.get_unique_constraints("tag")
existing_indexes = inspector.get_indexes("tag")
print(existing_pk, unique_constraints)
with op.batch_alter_table("tag", schema=None) as batch_op:
# Drop unique constraints that could conflict with new primary key
for constraint in unique_constraints:
if constraint["name"] == "uq_id_user_id":
batch_op.drop_constraint(constraint["name"], type_="unique")
for index in existing_indexes:
if index["unique"]:
# Drop the unique index
batch_op.drop_index(index["name"])
# Drop existing primary key constraint if it exists
if existing_pk and existing_pk.get("constrained_columns"):
batch_op.drop_constraint(existing_pk["name"], type_="primary")
# Immediately after dropping the old primary key, create the new one
batch_op.create_primary_key("pk_id_user_id", ["id", "user_id"])
def downgrade():
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
current_pk = inspector.get_pk_constraint("tag")
with op.batch_alter_table("tag", schema=None) as batch_op:
# Drop the current primary key first, if it matches the one we know we added in upgrade
if current_pk and "pk_id_user_id" == current_pk.get("name"):
batch_op.drop_constraint("pk_id_user_id", type_="primary")
# Restore the original primary key
batch_op.create_primary_key("pk_id", ["id"])
# Since primary key on just 'id' is restored, we now add back any unique constraints if necessary
batch_op.create_unique_constraint("uq_id_user_id", ["id", "user_id"])

View File

@ -0,0 +1,319 @@
/* HTML and Body */
@font-face {
font-family: 'NotoSans';
src: url('fonts/NotoSans-Variable.ttf');
}
@font-face {
font-family: 'NotoSansJP';
src: url('fonts/NotoSansJP-Variable.ttf');
}
@font-face {
font-family: 'NotoSansKR';
src: url('fonts/NotoSansKR-Variable.ttf');
}
@font-face {
font-family: 'NotoSansSC';
src: url('fonts/NotoSansSC-Variable.ttf');
}
@font-face {
font-family: 'NotoSansSC-Regular';
src: url('fonts/NotoSansSC-Regular.ttf');
}
html {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'NotoSans', 'NotoSansJP', 'NotoSansKR',
'NotoSansSC', 'STSong-Light', 'MSung-Light', 'HeiseiMin-W3', 'HYSMyeongJo-Medium', Roboto,
'Helvetica Neue', Arial, sans-serif;
font-size: 14px; /* Default font size */
line-height: 1.5;
}
*,
*::before,
*::after {
box-sizing: inherit;
}
body {
margin: 0;
color: #212529;
background-color: #fff;
width: auto;
}
/* Typography */
h1,
h2,
h3,
h4,
h5,
h6 {
font-weight: 500;
margin: 0;
}
h1 {
font-size: 2.5rem;
}
h2 {
font-size: 2rem;
}
h3 {
font-size: 1.75rem;
}
h4 {
font-size: 1.5rem;
}
h5 {
font-size: 1.25rem;
}
h6 {
font-size: 1rem;
}
p {
margin-top: 0;
margin-bottom: 1rem;
}
/* Grid System */
.container {
width: 100%;
padding-right: 15px;
padding-left: 15px;
margin-right: auto;
margin-left: auto;
}
/* Utilities */
.text-center {
text-align: center;
}
/* Additional Text Utilities */
.text-muted {
color: #6c757d; /* Muted text color */
}
/* Small Text */
small {
font-size: 80%; /* Smaller font size relative to the base */
color: #6c757d; /* Lighter text color for secondary information */
margin-bottom: 0;
margin-top: 0;
}
/* Strong Element Styles */
strong {
font-weight: bolder; /* Ensures the text is bold */
color: inherit; /* Inherits the color from its parent element */
}
/* link */
a {
color: #007bff;
text-decoration: none;
background-color: transparent;
}
a:hover {
color: #0056b3;
text-decoration: underline;
}
/* General styles for lists */
ol,
ul,
li {
padding-left: 40px; /* Increase padding to move bullet points to the right */
margin-left: 20px; /* Indent lists from the left */
}
/* Ordered list styles */
ol {
list-style-type: decimal; /* Use numbers for ordered lists */
margin-bottom: 10px; /* Space after each list */
}
ol li {
margin-bottom: 0.5rem; /* Space between ordered list items */
}
/* Unordered list styles */
ul {
list-style-type: disc; /* Use bullets for unordered lists */
margin-bottom: 10px; /* Space after each list */
}
ul li {
margin-bottom: 0.5rem; /* Space between unordered list items */
}
/* List item styles */
li {
margin-bottom: 5px; /* Space between list items */
line-height: 1.5; /* Line height for better readability */
}
/* Nested lists */
ol ol,
ol ul,
ul ol,
ul ul {
padding-left: 20px;
margin-left: 30px; /* Further indent nested lists */
margin-bottom: 0; /* Remove extra margin at the bottom of nested lists */
}
/* Code blocks */
pre {
background-color: #f4f4f4;
padding: 10px;
overflow-x: auto;
max-width: 100%; /* Ensure it doesn't overflow the page */
width: 80%; /* Set a specific width for a container-like appearance */
margin: 0 1em; /* Center the pre block */
box-sizing: border-box; /* Include padding in the width */
border: 1px solid #ccc; /* Optional: Add a border for better definition */
border-radius: 4px; /* Optional: Add rounded corners */
}
code {
font-family: 'Courier New', Courier, monospace;
background-color: #f4f4f4;
padding: 2px 4px;
border-radius: 4px;
box-sizing: border-box; /* Include padding in the width */
}
.message {
margin-top: 8px;
margin-bottom: 8px;
max-width: 100%;
overflow-wrap: break-word;
}
/* Table Styles */
table {
width: 100%;
margin-bottom: 1rem;
color: #212529;
border-collapse: collapse; /* Removes the space between borders */
}
th,
td {
margin: 0;
padding: 0.75rem;
vertical-align: top;
border-top: 1px solid #dee2e6;
}
thead th {
vertical-align: bottom;
border-bottom: 2px solid #dee2e6;
}
tbody + tbody {
border-top: 2px solid #dee2e6;
}
/* markdown-section styles */
.markdown-section blockquote,
.markdown-section h1,
.markdown-section h2,
.markdown-section h3,
.markdown-section h4,
.markdown-section h5,
.markdown-section h6,
.markdown-section p,
.markdown-section pre,
.markdown-section table,
.markdown-section ul {
/* Give most block elements margin top and bottom */
margin-top: 1rem;
}
/* Remove top margin if it's the first child */
.markdown-section blockquote:first-child,
.markdown-section h1:first-child,
.markdown-section h2:first-child,
.markdown-section h3:first-child,
.markdown-section h4:first-child,
.markdown-section h5:first-child,
.markdown-section h6:first-child,
.markdown-section p:first-child,
.markdown-section pre:first-child,
.markdown-section table:first-child,
.markdown-section ul:first-child {
margin-top: 0;
}
/* Remove top margin of <ul> following a <p> */
.markdown-section p + ul {
margin-top: 0;
}
/* Remove bottom margin of <p> if it is followed by a <ul> */
/* Note: :has is not supported in CSS, so you would need JavaScript for this behavior */
.markdown-section p {
margin-bottom: 0;
}
/* Add a rule to reset margin-bottom for <p> not followed by <ul> */
.markdown-section p + ul {
margin-top: 0;
}
/* List item styles */
.markdown-section li {
padding: 2px;
}
.markdown-section li p {
margin-bottom: 0;
padding: 0;
}
/* Avoid margins for nested lists */
.markdown-section li > ul {
margin-top: 0;
margin-bottom: 0;
}
/* Table styles */
.markdown-section table {
width: 100%;
border-collapse: collapse;
margin: 1rem 0;
}
.markdown-section th,
.markdown-section td {
border: 1px solid #ddd;
padding: 0.5rem;
text-align: left;
}
.markdown-section th {
background-color: #f2f2f2;
}
.markdown-section pre {
padding: 10px;
margin: 10px;
}
.markdown-section pre code {
position: relative;
color: rgb(172, 0, 95);
}

Binary file not shown.

View File

@ -0,0 +1,139 @@
from datetime import datetime
from io import BytesIO
from pathlib import Path
from typing import Dict, Any, List
from markdown import markdown
import site
from fpdf import FPDF
from open_webui.env import STATIC_DIR, FONTS_DIR
from open_webui.apps.webui.models.chats import ChatTitleMessagesForm
class PDFGenerator:
"""
Description:
The `PDFGenerator` class is designed to create PDF documents from chat messages.
The process involves transforming markdown content into HTML and then into a PDF format
Attributes:
- `form_data`: An instance of `ChatTitleMessagesForm` containing title and messages.
"""
def __init__(self, form_data: ChatTitleMessagesForm):
self.html_body = None
self.messages_html = None
self.form_data = form_data
self.css = Path(STATIC_DIR / "assets" / "pdf-style.css").read_text()
def format_timestamp(self, timestamp: float) -> str:
"""Convert a UNIX timestamp to a formatted date string."""
try:
date_time = datetime.fromtimestamp(timestamp)
return date_time.strftime("%Y-%m-%d, %H:%M:%S")
except (ValueError, TypeError) as e:
# Log the error if necessary
return ""
def _build_html_message(self, message: Dict[str, Any]) -> str:
"""Build HTML for a single message."""
role = message.get("role", "user")
content = message.get("content", "")
timestamp = message.get("timestamp")
model = message.get("model") if role == "assistant" else ""
date_str = self.format_timestamp(timestamp) if timestamp else ""
# extends pymdownx extension to convert markdown to html.
# - https://facelessuser.github.io/pymdown-extensions/usage_notes/
html_content = markdown(content, extensions=["pymdownx.extra"])
html_message = f"""
<div class="message">
<small> {date_str} </small>
<div>
<h2>
<strong>{role.title()}</strong>
<small class="text-muted">{model}</small>
</h2>
</div>
<div class="markdown-section">
{html_content}
</div>
</div>
"""
return html_message
def _generate_html_body(self) -> str:
"""Generate the full HTML body for the PDF."""
return f"""
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body>
<div class="container">
<div class="text-center">
<h1>{self.form_data.title}</h1>
</div>
<div>
{self.messages_html}
</div>
</div>
</body>
</html>
"""
def generate_chat_pdf(self) -> bytes:
"""
Generate a PDF from chat messages.
"""
try:
global FONTS_DIR
pdf = FPDF()
pdf.add_page()
# When running using `pip install` the static directory is in the site packages.
if not FONTS_DIR.exists():
FONTS_DIR = Path(site.getsitepackages()[0]) / "static/fonts"
# When running using `pip install -e .` the static directory is in the site packages.
# This path only works if `open-webui serve` is run from the root of this project.
if not FONTS_DIR.exists():
FONTS_DIR = Path("./backend/static/fonts")
pdf.add_font("NotoSans", "", f"{FONTS_DIR}/NotoSans-Regular.ttf")
pdf.add_font("NotoSans", "b", f"{FONTS_DIR}/NotoSans-Bold.ttf")
pdf.add_font("NotoSans", "i", f"{FONTS_DIR}/NotoSans-Italic.ttf")
pdf.add_font("NotoSansKR", "", f"{FONTS_DIR}/NotoSansKR-Regular.ttf")
pdf.add_font("NotoSansJP", "", f"{FONTS_DIR}/NotoSansJP-Regular.ttf")
pdf.add_font("NotoSansSC", "", f"{FONTS_DIR}/NotoSansSC-Regular.ttf")
pdf.set_font("NotoSans", size=12)
pdf.set_fallback_fonts(["NotoSansKR", "NotoSansJP", "NotoSansSC"])
pdf.set_auto_page_break(auto=True, margin=15)
# Build HTML messages
messages_html_list: List[str] = [
self._build_html_message(msg) for msg in self.form_data.messages
]
self.messages_html = "<div>" + "".join(messages_html_list) + "</div>"
# Generate full HTML body
self.html_body = self._generate_html_body()
pdf.write_html(self.html_body)
# Save the pdf with name .pdf
pdf_bytes = pdf.output()
return bytes(pdf_bytes)
except Exception as e:
raise e

View File

@ -44,13 +44,15 @@ chromadb==0.5.9
pymilvus==2.4.7
qdrant-client~=1.12.0
sentence-transformers==3.0.1
sentence-transformers==3.2.0
colbert-ai==0.2.21
einops==0.8.0
ftfy==6.2.3
pypdf==4.3.1
xhtml2pdf==0.2.16
pymdown-extensions==10.11.2
docx2txt==0.8
python-pptx==1.0.0
unstructured==0.15.9

View File

@ -118,7 +118,7 @@ Navigate to the apache sites-available directory:
`nano models.server.city.conf` # match this with your ollama server domain
Add the folloing virtualhost containing this example (modify as needed):
Add the following virtualhost containing this example (modify as needed):
```

View File

@ -50,13 +50,15 @@ dependencies = [
"chromadb==0.5.9",
"pymilvus==2.4.7",
"sentence-transformers==3.0.1",
"sentence-transformers==3.2.0",
"colbert-ai==0.2.21",
"einops==0.8.0",
"ftfy==6.2.3",
"pypdf==4.3.1",
"xhtml2pdf==0.2.16",
"pymdown-extensions==10.11.2",
"docx2txt==0.8",
"python-pptx==1.0.0",
"unstructured==0.15.9",

View File

@ -26,6 +26,8 @@
import ResetVectorDBConfirmDialog from '$lib/components/common/ConfirmDialog.svelte';
import SensitiveInput from '$lib/components/common/SensitiveInput.svelte';
import Tooltip from '$lib/components/common/Tooltip.svelte';
import Switch from '$lib/components/common/Switch.svelte';
import { text } from '@sveltejs/kit';
const i18n = getContext('i18n');
@ -48,6 +50,7 @@
let tikaServerUrl = '';
let showTikaServerUrl = false;
let textSplitter = '';
let chunkSize = 0;
let chunkOverlap = 0;
let pdfExtractImages = true;
@ -177,6 +180,7 @@
max_count: fileMaxCount === '' ? null : fileMaxCount
},
chunk: {
text_splitter: textSplitter,
chunk_overlap: chunkOverlap,
chunk_size: chunkSize
},
@ -222,11 +226,13 @@
await setRerankingConfig();
querySettings = await getQuerySettings(localStorage.token);
const res = await getRAGConfig(localStorage.token);
if (res) {
pdfExtractImages = res.pdf_extract_images;
textSplitter = res.chunk.text_splitter;
chunkSize = res.chunk.chunk_size;
chunkOverlap = res.chunk.chunk_overlap;
@ -535,13 +541,13 @@
<hr class=" dark:border-gray-850" />
<div class="">
<div class="text-sm font-medium">{$i18n.t('Content Extraction')}</div>
<div class="text-sm font-medium mb-1">{$i18n.t('Content Extraction')}</div>
<div class="flex w-full justify-between mt-2">
<div class="flex w-full justify-between">
<div class="self-center text-xs font-medium">{$i18n.t('Engine')}</div>
<div class="flex items-center relative">
<select
class="dark:bg-gray-900 w-fit pr-8 rounded px-2 p-1 text-xs bg-transparent outline-none text-right"
class="dark:bg-gray-900 w-fit pr-8 rounded px-2 text-xs bg-transparent outline-none text-right"
bind:value={contentExtractionEngine}
on:change={(e) => {
showTikaServerUrl = e.target.value === 'tika';
@ -554,7 +560,7 @@
</div>
{#if showTikaServerUrl}
<div class="flex w-full mt-2">
<div class="flex w-full mt-1">
<div class="flex-1 mr-2">
<input
class="w-full rounded-lg py-2 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
@ -568,10 +574,139 @@
<hr class=" dark:border-gray-850" />
<div class="">
<div class="text-sm font-medium">{$i18n.t('Files')}</div>
<div class=" ">
<div class=" text-sm font-medium mb-1">{$i18n.t('Query Params')}</div>
<div class=" my-2 flex gap-1.5">
<div class=" flex gap-1.5">
<div class="flex flex-col w-full gap-1">
<div class=" text-xs font-medium w-full">{$i18n.t('Top K')}</div>
<div class="w-full">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Top K')}
bind:value={querySettings.k}
autocomplete="off"
min="0"
/>
</div>
</div>
{#if querySettings.hybrid === true}
<div class=" flex flex-col w-full gap-1">
<div class="text-xs font-medium w-full">
{$i18n.t('Minimum Score')}
</div>
<div class="w-full">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
step="0.01"
placeholder={$i18n.t('Enter Score')}
bind:value={querySettings.r}
autocomplete="off"
min="0.0"
title={$i18n.t('The score should be a value between 0.0 (0%) and 1.0 (100%).')}
/>
</div>
</div>
{/if}
</div>
{#if querySettings.hybrid === true}
<div class="mt-2 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t(
'Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.'
)}
</div>
{/if}
<div class="mt-2">
<div class=" mb-1 text-xs font-medium">{$i18n.t('RAG Template')}</div>
<Tooltip
content={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
placement="top-start"
>
<textarea
bind:value={querySettings.template}
placeholder={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
class="w-full rounded-lg px-4 py-3 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
rows="4"
/>
</Tooltip>
</div>
</div>
<hr class=" dark:border-gray-850" />
<div class=" ">
<div class="mb-1 text-sm font-medium">{$i18n.t('Chunk Params')}</div>
<div class="flex w-full justify-between mb-1.5">
<div class="self-center text-xs font-medium">{$i18n.t('Text Splitter')}</div>
<div class="flex items-center relative">
<select
class="dark:bg-gray-900 w-fit pr-8 rounded px-2 text-xs bg-transparent outline-none text-right"
bind:value={textSplitter}
>
<option value="">{$i18n.t('Default (Character)')} </option>
<option value="token">{$i18n.t('Token (Tiktoken)')}</option>
</select>
</div>
</div>
<div class=" flex gap-1.5">
<div class=" w-full justify-between">
<div class="self-center text-xs font-medium min-w-fit mb-1">{$i18n.t('Chunk Size')}</div>
<div class="self-center">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Size')}
bind:value={chunkSize}
autocomplete="off"
min="0"
/>
</div>
</div>
<div class="w-full">
<div class=" self-center text-xs font-medium min-w-fit mb-1">
{$i18n.t('Chunk Overlap')}
</div>
<div class="self-center">
<input
class="w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Overlap')}
bind:value={chunkOverlap}
autocomplete="off"
min="0"
/>
</div>
</div>
</div>
<div class="my-2">
<div class="flex justify-between items-center text-xs">
<div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div>
<div>
<Switch bind:state={pdfExtractImages} />
</div>
</div>
</div>
</div>
<hr class=" dark:border-gray-850" />
<div class="">
<div class="text-sm font-medium mb-1">{$i18n.t('Files')}</div>
<div class=" flex gap-1.5">
<div class="w-full">
<div class=" self-center text-xs font-medium min-w-fit mb-1">
{$i18n.t('Max Upload Size')}
@ -623,128 +758,6 @@
<hr class=" dark:border-gray-850" />
<div class=" ">
<div class=" text-sm font-medium">{$i18n.t('Query Params')}</div>
<div class=" flex gap-1">
<div class=" flex w-full justify-between">
<div class="self-center text-xs font-medium min-w-fit">{$i18n.t('Top K')}</div>
<div class="self-center p-3">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Top K')}
bind:value={querySettings.k}
autocomplete="off"
min="0"
/>
</div>
</div>
{#if querySettings.hybrid === true}
<div class=" flex w-full justify-between">
<div class=" self-center text-xs font-medium min-w-fit">
{$i18n.t('Minimum Score')}
</div>
<div class="self-center p-3">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
step="0.01"
placeholder={$i18n.t('Enter Score')}
bind:value={querySettings.r}
autocomplete="off"
min="0.0"
title={$i18n.t('The score should be a value between 0.0 (0%) and 1.0 (100%).')}
/>
</div>
</div>
{/if}
</div>
{#if querySettings.hybrid === true}
<div class="mt-2 mb-1 text-xs text-gray-400 dark:text-gray-500">
{$i18n.t(
'Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.'
)}
</div>
<hr class=" dark:border-gray-850 my-3" />
{/if}
<div>
<div class=" mb-2.5 text-sm font-medium">{$i18n.t('RAG Template')}</div>
<Tooltip
content={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
placement="top-start"
>
<textarea
bind:value={querySettings.template}
placeholder={$i18n.t('Leave empty to use the default prompt, or enter a custom prompt')}
class="w-full rounded-lg px-4 py-3 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none resize-none"
rows="4"
/>
</Tooltip>
</div>
</div>
<hr class=" dark:border-gray-850" />
<div class=" ">
<div class=" text-sm font-medium">{$i18n.t('Chunk Params')}</div>
<div class=" my-2 flex gap-1.5">
<div class=" w-full justify-between">
<div class="self-center text-xs font-medium min-w-fit mb-1">{$i18n.t('Chunk Size')}</div>
<div class="self-center">
<input
class=" w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Size')}
bind:value={chunkSize}
autocomplete="off"
min="0"
/>
</div>
</div>
<div class="w-full">
<div class=" self-center text-xs font-medium min-w-fit mb-1">
{$i18n.t('Chunk Overlap')}
</div>
<div class="self-center">
<input
class="w-full rounded-lg py-1.5 px-4 text-sm bg-gray-50 dark:text-gray-300 dark:bg-gray-850 outline-none"
type="number"
placeholder={$i18n.t('Enter Chunk Overlap')}
bind:value={chunkOverlap}
autocomplete="off"
min="0"
/>
</div>
</div>
</div>
<div class="my-3">
<div class="flex justify-between items-center text-xs">
<div class=" text-xs font-medium">{$i18n.t('PDF Extract Images (OCR)')}</div>
<button
class=" text-xs font-medium text-gray-500"
type="button"
on:click={() => {
pdfExtractImages = !pdfExtractImages;
}}>{pdfExtractImages ? $i18n.t('On') : $i18n.t('Off')}</button
>
</div>
</div>
</div>
<hr class=" dark:border-gray-850" />
<div>
<button
class=" flex rounded-xl py-2 px-3.5 w-full hover:bg-gray-200 dark:hover:bg-gray-800 transition"
@ -794,7 +807,9 @@
/>
</svg>
</div>
<div class=" self-center text-sm font-medium">{$i18n.t('Reset Vector Storage')}</div>
<div class=" self-center text-sm font-medium">
{$i18n.t('Reset Vector Storage/Knowledge')}
</div>
</button>
</div>
</div>

View File

@ -24,9 +24,17 @@
}
};
let chatDeletion = true;
let chatEdit = true;
let chatTemporary = true;
onMount(async () => {
permissions = await getUserPermissions(localStorage.token);
chatDeletion = permissions?.chat?.deletion ?? true;
chatEdit = permissions?.chat?.editing ?? true;
chatTemporary = permissions?.chat?.temporary ?? true;
const res = await getModelFilterConfig(localStorage.token);
if (res) {
whitelistEnabled = res.enabled;
@ -43,7 +51,13 @@
// console.log('submit');
await setDefaultModels(localStorage.token, defaultModelId);
await updateUserPermissions(localStorage.token, permissions);
await updateUserPermissions(localStorage.token, {
chat: {
deletion: chatDeletion,
editing: chatEdit,
temporary: chatTemporary
}
});
await updateModelFilterConfig(localStorage.token, whitelistEnabled, whitelistModels);
saveHandler();
@ -54,127 +68,22 @@
<div>
<div class=" mb-2 text-sm font-medium">{$i18n.t('User Permissions')}</div>
<div class=" flex w-full justify-between">
<div class=" flex w-full justify-between my-2 pr-2">
<div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Deletion')}</div>
<button
class="p-1 px-3 text-xs flex rounded transition"
on:click={() => {
permissions.chat.deletion = !(permissions?.chat?.deletion ?? true);
}}
type="button"
>
{#if permissions?.chat?.deletion ?? true}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M11.5 1A3.5 3.5 0 0 0 8 4.5V7H2.5A1.5 1.5 0 0 0 1 8.5v5A1.5 1.5 0 0 0 2.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 9.5 7V4.5a2 2 0 1 1 4 0v1.75a.75.75 0 0 0 1.5 0V4.5A3.5 3.5 0 0 0 11.5 1Z"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t('Allow')}</span>
{:else}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M8 1a3.5 3.5 0 0 0-3.5 3.5V7A1.5 1.5 0 0 0 3 8.5v5A1.5 1.5 0 0 0 4.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 11.5 7V4.5A3.5 3.5 0 0 0 8 1Zm2 6V4.5a2 2 0 1 0-4 0V7h4Z"
clip-rule="evenodd"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t("Don't Allow")}</span>
{/if}
</button>
<Switch bind:state={chatDeletion} />
</div>
<div class=" flex w-full justify-between">
<div class=" flex w-full justify-between my-2 pr-2">
<div class=" self-center text-xs font-medium">{$i18n.t('Allow Chat Editing')}</div>
<button
class="p-1 px-3 text-xs flex rounded transition"
on:click={() => {
permissions.chat.editing = !(permissions?.chat?.editing ?? true);
}}
type="button"
>
{#if permissions?.chat?.editing ?? true}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M11.5 1A3.5 3.5 0 0 0 8 4.5V7H2.5A1.5 1.5 0 0 0 1 8.5v5A1.5 1.5 0 0 0 2.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 9.5 7V4.5a2 2 0 1 1 4 0v1.75a.75.75 0 0 0 1.5 0V4.5A3.5 3.5 0 0 0 11.5 1Z"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t('Allow')}</span>
{:else}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M8 1a3.5 3.5 0 0 0-3.5 3.5V7A1.5 1.5 0 0 0 3 8.5v5A1.5 1.5 0 0 0 4.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 11.5 7V4.5A3.5 3.5 0 0 0 8 1Zm2 6V4.5a2 2 0 1 0-4 0V7h4Z"
clip-rule="evenodd"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t("Don't Allow")}</span>
{/if}
</button>
<Switch bind:state={chatEdit} />
</div>
<div class=" flex w-full justify-between">
<div class=" flex w-full justify-between my-2 pr-2">
<div class=" self-center text-xs font-medium">{$i18n.t('Allow Temporary Chat')}</div>
<button
class="p-1 px-3 text-xs flex rounded transition"
on:click={() => {
permissions.chat.temporary = !(permissions?.chat?.temporary ?? true);
}}
type="button"
>
{#if permissions?.chat?.temporary ?? true}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
d="M11.5 1A3.5 3.5 0 0 0 8 4.5V7H2.5A1.5 1.5 0 0 0 1 8.5v5A1.5 1.5 0 0 0 2.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 9.5 7V4.5a2 2 0 1 1 4 0v1.75a.75.75 0 0 0 1.5 0V4.5A3.5 3.5 0 0 0 11.5 1Z"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t('Allow')}</span>
{:else}
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 16 16"
fill="currentColor"
class="w-4 h-4"
>
<path
fill-rule="evenodd"
d="M8 1a3.5 3.5 0 0 0-3.5 3.5V7A1.5 1.5 0 0 0 3 8.5v5A1.5 1.5 0 0 0 4.5 15h7a1.5 1.5 0 0 0 1.5-1.5v-5A1.5 1.5 0 0 0 11.5 7V4.5A3.5 3.5 0 0 0 8 1Zm2 6V4.5a2 2 0 1 0-4 0V7h4Z"
clip-rule="evenodd"
/>
</svg>
<span class="ml-2 self-center">{$i18n.t("Don't Allow")}</span>
{/if}
</button>
<Switch bind:state={chatTemporary} />
</div>
</div>
@ -210,7 +119,7 @@
<div class=" space-y-1">
<div class="mb-2">
<div class="flex justify-between items-center text-xs">
<div class="flex justify-between items-center text-xs my-3 pr-2">
<div class=" text-xs font-medium">{$i18n.t('Model Whitelisting')}</div>
<Switch bind:state={whitelistEnabled} />

View File

@ -175,10 +175,30 @@
message.statusHistory = [data];
}
} else if (type === 'citation') {
if (message?.citations) {
message.citations.push(data);
if (data?.type === 'code_execution') {
// Code execution; update existing code execution by ID, or add new one.
if (!message?.code_executions) {
message.code_executions = [];
}
const existingCodeExecutionIndex = message.code_executions.findIndex(
(execution) => execution.id === data.id
);
if (existingCodeExecutionIndex !== -1) {
message.code_executions[existingCodeExecutionIndex] = data;
} else {
message.code_executions.push(data);
}
message.code_executions = message.code_executions;
} else {
message.citations = [data];
// Regular citation.
if (message?.citations) {
message.citations.push(data);
} else {
message.citations = [data];
}
}
} else if (type === 'message') {
message.content += data.content;

View File

@ -159,7 +159,7 @@
{#if filteredItems.length > 0 || prompt.split(' ')?.at(0)?.substring(1).startsWith('http')}
<div
id="commands-container"
class="pl-8 pr-16 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
class="pl-3 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
>
<div class="flex w-full rounded-xl border border-gray-50 dark:border-gray-850">
<div

View File

@ -68,7 +68,7 @@
{#if filteredItems.length > 0}
<div
id="commands-container"
class="pl-8 pr-16 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
class="pl-3 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
>
<div class="flex w-full rounded-xl border border-gray-50 dark:border-gray-850">
<div

View File

@ -132,7 +132,7 @@
{#if filteredPrompts.length > 0}
<div
id="commands-container"
class="pl-8 pr-16 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
class="pl-3 pr-14 mb-3 text-left w-full absolute bottom-0 left-0 right-0 z-10"
>
<div class="flex w-full rounded-xl border border-gray-50 dark:border-gray-850">
<div

View File

@ -18,12 +18,18 @@
const dispatch = createEventDispatcher();
export let id = '';
export let save = false;
export let run = true;
export let token;
export let lang = '';
export let code = '';
export let className = 'my-2';
export let editorClassName = '';
export let stickyButtonsClassName = 'top-8';
let _code = '';
$: if (code) {
updateCode();
@ -126,7 +132,7 @@
}
},
stderr: (text) => {
console.log('An error occured:', text);
console.log('An error occurred:', text);
if (stderr) {
stderr += `${text}\n`;
} else {
@ -296,7 +302,7 @@ __builtins__.input = input`);
</script>
<div>
<div class="relative my-2 flex flex-col rounded-lg" dir="ltr">
<div class="relative {className} flex flex-col rounded-lg" dir="ltr">
{#if lang === 'mermaid'}
{#if mermaidHtml}
<SvgPanZoom
@ -313,13 +319,13 @@ __builtins__.input = input`);
</div>
<div
class="sticky top-8 mb-1 py-1 pr-2.5 flex items-center justify-end z-10 text-xs text-black dark:text-white"
class="sticky {stickyButtonsClassName} mb-1 py-1 pr-2.5 flex items-center justify-end z-10 text-xs text-black dark:text-white"
>
<div class="flex items-center gap-0.5 translate-y-[1px]">
{#if lang.toLowerCase() === 'python' || lang.toLowerCase() === 'py' || (lang === '' && checkPythonCode(code))}
{#if executing}
<div class="run-code-button bg-none border-none p-1 cursor-not-allowed">Running</div>
{:else}
{:else if run}
<button
class="run-code-button bg-none border-none bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-md px-1.5 py-0.5"
on:click={async () => {
@ -348,9 +354,11 @@ __builtins__.input = input`);
</div>
<div
class="language-{lang} rounded-t-lg -mt-8 {executing || stdout || stderr || result
? ''
: 'rounded-b-lg'} overflow-hidden"
class="language-{lang} rounded-t-lg -mt-8 {editorClassName
? editorClassName
: executing || stdout || stderr || result
? ''
: 'rounded-b-lg'} overflow-hidden"
>
<div class=" pt-7 bg-gray-50 dark:bg-gray-850"></div>
<CodeEditor

View File

@ -0,0 +1,118 @@
<script lang="ts">
import { getContext } from 'svelte';
import CodeBlock from './CodeBlock.svelte';
import Modal from '$lib/components/common/Modal.svelte';
import Spinner from '$lib/components/common/Spinner.svelte';
import Badge from '$lib/components/common/Badge.svelte';
const i18n = getContext('i18n');
export let show = false;
export let codeExecution = null;
</script>
<Modal size="lg" bind:show>
<div>
<div class="flex justify-between dark:text-gray-300 px-5 pt-4 pb-2">
<div class="text-lg font-medium self-center flex flex-col gap-0.5 capitalize">
{#if codeExecution?.result}
<div>
{#if codeExecution.result?.error}
<Badge type="error" content="error" />
{:else if codeExecution.result?.output}
<Badge type="success" content="success" />
{:else}
<Badge type="warning" content="incomplete" />
{/if}
</div>
{/if}
<div class="flex gap-2 items-center">
{#if !codeExecution?.result}
<div>
<Spinner className="size-4" />
</div>
{/if}
<div>
{#if codeExecution?.name}
{$i18n.t('Code execution')}: {codeExecution?.name}
{:else}
{$i18n.t('Code execution')}
{/if}
</div>
</div>
</div>
<button
class="self-center"
on:click={() => {
show = false;
codeExecution = null;
}}
>
<svg
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 20 20"
fill="currentColor"
class="w-5 h-5"
>
<path
d="M6.28 5.22a.75.75 0 00-1.06 1.06L8.94 10l-3.72 3.72a.75.75 0 101.06 1.06L10 11.06l3.72 3.72a.75.75 0 101.06-1.06L11.06 10l3.72-3.72a.75.75 0 00-1.06-1.06L10 8.94 6.28 5.22z"
/>
</svg>
</button>
</div>
<div class="flex flex-col md:flex-row w-full px-4 pb-5">
<div
class="flex flex-col w-full dark:text-gray-200 overflow-y-scroll max-h-[22rem] scrollbar-hidden"
>
<div class="flex flex-col w-full">
<CodeBlock
id="code-exec-{codeExecution?.id}-code"
lang={codeExecution?.language ?? ''}
code={codeExecution?.code ?? ''}
className=""
editorClassName={codeExecution?.result &&
(codeExecution?.result?.error || codeExecution?.result?.output)
? 'rounded-b-none'
: ''}
stickyButtonsClassName="top-0"
run={false}
/>
</div>
{#if codeExecution?.result && (codeExecution?.result?.error || codeExecution?.result?.output)}
<div class="dark:bg-[#202123] dark:text-white px-4 py-4 rounded-b-lg flex flex-col gap-3">
{#if codeExecution?.result?.error}
<div>
<div class=" text-gray-500 text-xs mb-1">{$i18n.t('ERROR')}</div>
<div class="text-sm">{codeExecution?.result?.error}</div>
</div>
{/if}
{#if codeExecution?.result?.output}
<div>
<div class=" text-gray-500 text-xs mb-1">{$i18n.t('OUTPUT')}</div>
<div class="text-sm">{codeExecution?.result?.output}</div>
</div>
{/if}
</div>
{/if}
{#if codeExecution?.result?.files && codeExecution?.result?.files.length > 0}
<div class="flex flex-col w-full">
<hr class=" dark:border-gray-850 my-2" />
<div class=" text-sm font-medium dark:text-gray-300">
{$i18n.t('Files')}
</div>
<ul class="mt-1 list-disc pl-4 text-xs">
{#each codeExecution?.result?.files as file}
<li>
<a href={file.url} target="_blank">{file.name}</a>
</li>
{/each}
</ul>
</div>
{/if}
</div>
</div>
</div>
</Modal>

View File

@ -0,0 +1,80 @@
<script lang="ts">
import CodeExecutionModal from './CodeExecutionModal.svelte';
import Spinner from '$lib/components/common/Spinner.svelte';
import Check from '$lib/components/icons/Check.svelte';
import XMark from '$lib/components/icons/XMark.svelte';
import EllipsisHorizontal from '$lib/components/icons/EllipsisHorizontal.svelte';
export let codeExecutions = [];
let selectedCodeExecution = null;
let showCodeExecutionModal = false;
$: if (codeExecutions) {
updateSelectedCodeExecution();
}
const updateSelectedCodeExecution = () => {
if (selectedCodeExecution) {
selectedCodeExecution = codeExecutions.find(
(execution) => execution.id === selectedCodeExecution.id
);
}
};
</script>
<CodeExecutionModal bind:show={showCodeExecutionModal} codeExecution={selectedCodeExecution} />
{#if codeExecutions.length > 0}
<div class="mt-1 mb-2 w-full flex gap-1 items-center flex-wrap">
{#each codeExecutions as execution (execution.id)}
<div class="flex gap-1 text-xs font-semibold">
<button
class="flex dark:text-gray-300 py-1 px-1 bg-gray-50 hover:bg-gray-100 dark:bg-gray-850 dark:hover:bg-gray-800 transition rounded-xl max-w-96"
on:click={() => {
selectedCodeExecution = execution;
showCodeExecutionModal = true;
}}
>
<div
class="bg-white dark:bg-gray-700 rounded-full size-4 flex items-center justify-center"
>
{#if execution?.result}
{#if execution.result?.error}
<XMark />
{:else if execution.result?.output}
<Check strokeWidth="3" className="size-3" />
{:else}
<EllipsisHorizontal />
{/if}
{:else}
<Spinner className="size-4" />
{/if}
</div>
<div
class="flex-1 mx-2 line-clamp-1 code-execution-name {execution?.result ? '' : 'pulse'}"
>
{execution.name}
</div>
</button>
</div>
{/each}
</div>
{/if}
<style>
@keyframes pulse {
0%,
100% {
opacity: 1;
}
50% {
opacity: 0.6;
}
}
.pulse {
opacity: 1;
animation: pulse 1.5s ease;
}
</style>

View File

@ -35,6 +35,7 @@
import Markdown from './Markdown.svelte';
import Error from './Error.svelte';
import Citations from './Citations.svelte';
import CodeExecutions from './CodeExecutions.svelte';
import type { Writable } from 'svelte/store';
import type { i18n as i18nType } from 'i18next';
@ -64,6 +65,17 @@
done: boolean;
error?: boolean | { content: string };
citations?: string[];
code_executions?: {
uuid: string;
name: string;
code: string;
language?: string;
result?: {
error?: string;
output?: string;
files?: { name: string; url: string }[];
};
}[];
info?: {
openai?: boolean;
prompt_tokens?: number;
@ -516,6 +528,10 @@
{#if message.citations}
<Citations citations={message.citations} />
{/if}
{#if message.code_executions}
<CodeExecutions codeExecutions={message.code_executions} />
{/if}
</div>
{/if}
</div>

View File

@ -198,7 +198,7 @@
saveAs(blob, `${model.id}-${Date.now()}.json`);
};
const positionChangeHanlder = async () => {
const positionChangeHandler = async () => {
// Get the new order of the models
const modelIds = Array.from(document.getElementById('model-list').children).map((child) =>
child.id.replace('model-item-', '')
@ -248,7 +248,7 @@
animation: 150,
onUpdate: async (event) => {
console.log(event);
positionChangeHanlder();
positionChangeHandler();
}
});
}

View File

@ -1,7 +1,7 @@
{
"'s', 'm', 'h', 'd', 'w' or '-1' for no expiration.": "'s', 'm', 'h', 'd', 'w' または '-1' で無期限。",
"(Beta)": "(ベータ版)",
"(e.g. `sh webui.sh --api --api-auth username_password`)": "",
"(e.g. `sh webui.sh --api --api-auth username_password`)": "(例: `sh webui.sh --api --api-auth username_password`)",
"(e.g. `sh webui.sh --api`)": "(例: `sh webui.sh --api`)",
"(latest)": "(最新)",
"{{ models }}": "{{ モデル }}",
@ -9,13 +9,13 @@
"{{user}}'s Chats": "{{user}} のチャット",
"{{webUIName}} Backend Required": "{{webUIName}} バックエンドが必要です",
"*Prompt node ID(s) are required for image generation": "",
"A new version (v{{LATEST_VERSION}}) is now available.": "新しいバージョンがReleaseされています。",
"A task model is used when performing tasks such as generating titles for chats and web search queries": "タスクモデルは、チャットやWeb検索クエリのタイトルの生成などのタスクを実行するときに使用されます",
"A new version (v{{LATEST_VERSION}}) is now available.": "新しいバージョンが利用可能です。",
"A task model is used when performing tasks such as generating titles for chats and web search queries": "タスクモデルは、チャットやウェブ検索クエリのタイトルの生成などのタスクを実行するときに使用されます",
"a user": "ユーザー",
"About": "概要",
"Account": "アカウント",
"Account Activation Pending": "アカウント承認待ち",
"Accurate information": "情報の正確性",
"Accurate information": "情報が正確",
"Actions": "アクション",
"Active Users": "アクティブユーザー",
"Add": "追加",
@ -50,9 +50,9 @@
"Allow Chat Deletion": "チャットの削除を許可",
"Allow Chat Editing": "チャットの編集を許可",
"Allow non-local voices": "ローカル以外のボイスを許可",
"Allow Temporary Chat": "チャットの一時許可",
"Allow Temporary Chat": "一時的なチャットを許可",
"Allow User Location": "ユーザーロケーションの許可",
"Allow Voice Interruption in Call": "音声の割り込みを許可",
"Allow Voice Interruption in Call": "通話中に音声の割り込みを許可",
"alphanumeric characters and hyphens": "英数字とハイフン",
"Already have an account?": "すでにアカウントをお持ちですか?",
"an assistant": "アシスタント",
@ -86,8 +86,8 @@
"Back": "戻る",
"Bad Response": "応答が悪い",
"Banners": "バナー",
"Base Model (From)": "ベースモデル(From)",
"Batch Size (num_batch)": "バッチサイズ",
"Base Model (From)": "ベースモデル (From)",
"Batch Size (num_batch)": "バッチサイズ (num_batch)",
"before": "より前",
"Being lazy": "怠惰な",
"Brave Search API Key": "Brave Search APIキー",
@ -99,7 +99,7 @@
"Capabilities": "資格",
"Change Password": "パスワードを変更",
"Chat": "チャット",
"Chat Background Image": "チャットバックグラウンドイメージ",
"Chat Background Image": "チャットの背景画像",
"Chat Bubble UI": "チャットバブルUI",
"Chat Controls": "チャットコントロール",
"Chat direction": "チャットの方向",
@ -113,21 +113,21 @@
"Chunk Params": "チャンクパラメーター",
"Chunk Size": "チャンクサイズ",
"Citation": "引用文",
"Clear memory": "メモリクリア",
"Clear memory": "メモリクリア",
"Click here for help.": "ヘルプについてはここをクリックしてください。",
"Click here to": "ここをクリックして",
"Click here to download user import template file.": "ユーザーテンプレートをインポートするにはここをクリック",
"Click here to download user import template file.": "ユーザーテンプレートをインポートするにはここをクリックしてください。",
"Click here to select": "選択するにはここをクリックしてください",
"Click here to select a csv file.": "CSVファイルを選択するにはここをクリックしてください。",
"Click here to select a py file.": "Pythonスクリプトファイルを選択するにはここをクリック",
"Click here to select a py file.": "Pythonスクリプトファイルを選択するにはここをクリックしてください。",
"Click here to select documents.": "ドキュメントを選択するにはここをクリックしてください。",
"Click here to upload a workflow.json file.": "workflow.jsonファイルをアップロードするにはここをクリック。",
"Click here to upload a workflow.json file.": "workflow.jsonファイルをアップロードするにはここをクリックしてください。",
"click here.": "ここをクリックしてください。",
"Click on the user role button to change a user's role.": "ユーザーの役割を変更するには、ユーザー役割ボタンをクリックしてください。",
"Clipboard write permission denied. Please check your browser settings to grant the necessary access.": "クリップボードへの書き込み許可がありません。ブラウザ設定を確認し許可してください。",
"Clone": "クローン",
"Close": "閉じる",
"Code formatted successfully": "コードフォーマット完了",
"Code formatted successfully": "コードフォーマットに成功しました",
"Collection": "コレクション",
"ComfyUI": "ComfyUI",
"ComfyUI Base URL": "ComfyUIベースURL",
@ -135,14 +135,14 @@
"ComfyUI Workflow": "ComfyUIワークフロー",
"ComfyUI Workflow Nodes": "ComfyUIワークフローード",
"Command": "コマンド",
"Concurrent Requests": "コンカレント要求",
"Concurrent Requests": "同時リクエスト",
"Confirm": "確認",
"Confirm Password": "パスワードを確認",
"Confirm your action": "",
"Confirm your action": "あなたのアクションの確認",
"Connections": "接続",
"Contact Admin for WebUI Access": "WEBUIへの接続について管理者に問い合わせ下さい。",
"Content": "コンテンツ",
"Content Extraction": "",
"Content Extraction": "コンテンツ抽出",
"Context Length": "コンテキストの長さ",
"Continue Response": "続きの応答",
"Continue with {{provider}}": "",
@ -158,7 +158,7 @@
"Copying to clipboard was successful!": "クリップボードへのコピーが成功しました!",
"Create a model": "モデルを作成する",
"Create Account": "アカウントを作成",
"Create Knowledge": "RAG用データ作成",
"Create Knowledge": "知識データ作成",
"Create new key": "新しいキーを作成",
"Create new secret key": "新しいシークレットキーを作成",
"Created at": "作成日時",
@ -198,18 +198,18 @@
"Description": "説明",
"Didn't fully follow instructions": "説明に沿って操作していませんでした",
"Disabled": "無効",
"Discover a function": "",
"Discover a model": "モデルを検出する",
"Discover a function": "Functionを探す",
"Discover a model": "モデルを探す",
"Discover a prompt": "プロンプトを探す",
"Discover a tool": "ツールを探す",
"Discover, download, and explore custom functions": "カスタムFunctionを探しダウンロードする",
"Discover, download, and explore custom prompts": "カスタムプロンプトを見つけて、ダウンロードして、探索",
"Discover, download, and explore custom tools": "カスタムツールを探しダウンロードする",
"Discover, download, and explore model presets": "モデルプリセットを見つけて、ダウンロードして、探索",
"Discover, download, and explore custom functions": "カスタムFunctionを探しダウンロードする",
"Discover, download, and explore custom prompts": "カスタムプロンプトを探してダウンロードする",
"Discover, download, and explore custom tools": "カスタムツールを探しダウンロードする",
"Discover, download, and explore model presets": "モデルプリセットを探してダウンロードする",
"Dismissible": "",
"Display Emoji in Call": "",
"Display Emoji in Call": "コールで絵文字を表示",
"Display the username instead of You in the Chat": "チャットで「あなた」の代わりにユーザー名を表示",
"Do not install functions from sources you do not fully trust.": "信楽出来ないソースからFunctionをインストールしないでください。",
"Do not install functions from sources you do not fully trust.": "信頼できないソースからFunctionをインストールしないでください。",
"Do not install tools from sources you do not fully trust.": "信頼出来ないソースからツールをインストールしないでください。",
"Document": "ドキュメント",
"Documentation": "ドキュメント",
@ -221,14 +221,14 @@
"don't install random tools from sources you don't trust.": "信頼出来ないソースからランダムツールをインストールしないでください。",
"Don't like the style": "デザインが好きでない",
"Done": "完了",
"Download": "ダウンロードをキャンセルしました",
"Download": "ダウンロード",
"Download canceled": "ダウンロードをキャンセルしました",
"Download Database": "データベースをダウンロード",
"Drop any files here to add to the conversation": "会話を追加するには、ここにファイルをドロップしてください",
"e.g. '30s','10m'. Valid time units are 's', 'm', 'h'.": "例: '30秒'、'10分'。有効な時間単位は '秒'、'分'、'時間' です。",
"Edit": "編集",
"Edit Doc": "ドキュメントを編集",
"Edit Memory": "",
"Edit Memory": "メモリを編集",
"Edit User": "ユーザーを編集",
"ElevenLabs": "",
"Email": "メールアドレス",
@ -236,19 +236,19 @@
"Embedding Model": "埋め込みモデル",
"Embedding Model Engine": "埋め込みモデルエンジン",
"Embedding model set to \"{{embedding_model}}\"": "埋め込みモデルを\"{{embedding_model}}\"に設定しました",
"Enable Community Sharing": "コミュニティ共有の有効化",
"Enable Message Rating": "メッセージRatingの有効化",
"Enable New Sign Ups": "新規登録を有効",
"Enable Web Search": "Web 検索を有効にする",
"Enable Web Search Query Generation": "WEBサーチQueryの有効化",
"Enable Community Sharing": "コミュニティ共有を有効にする",
"Enable Message Rating": "メッセージ評価を有効にする",
"Enable New Sign Ups": "新規登録を有効にする",
"Enable Web Search": "ウェブ検索を有効にする",
"Enable Web Search Query Generation": "ウェブ検索クエリ生成を有効にする",
"Enabled": "有効",
"Engine": "エンジン",
"Ensure your CSV file includes 4 columns in this order: Name, Email, Password, Role.": "CSVファイルに4つの列が含まれていることを確認してください: Name, Email, Password, Role.",
"Enter {{role}} message here": "{{role}} メッセージをここに入力してください",
"Enter a detail about yourself for your LLMs to recall": "LLM が記憶するために、自分についての詳細を入力してください",
"Enter api auth string (e.g. username:password)": "API AuthStringを入力(e.g Username:Password)",
"Enter api auth string (e.g. username:password)": "API AuthStringを入力(例: Username:Password)",
"Enter Brave Search API Key": "Brave Search APIキーの入力",
"Enter CFG Scale (e.g. 7.0)": "",
"Enter CFG Scale (e.g. 7.0)": "CFGスケースを入力してください (例: 7.0)",
"Enter Chunk Overlap": "チャンクオーバーラップを入力してください",
"Enter Chunk Size": "チャンクサイズを入力してください",
"Enter Github Raw URL": "Github Raw URLを入力",
@ -277,16 +277,16 @@
"Enter URL (e.g. http://localhost:11434)": "URL を入力してください (例: http://localhost:11434)",
"Enter Your Email": "メールアドレスを入力してください",
"Enter Your Full Name": "フルネームを入力してください",
"Enter your message": "メッセージを入力してください",
"Enter your message": "メッセージを入力してください",
"Enter Your Password": "パスワードを入力してください",
"Enter Your Role": "ロールを入力してください",
"Error": "エラー",
"Experimental": "実験",
"Export": "輸出",
"Experimental": "実験",
"Export": "エクスポート",
"Export All Chats (All Users)": "すべてのチャットをエクスポート (すべてのユーザー)",
"Export chat (.json)": "チャットをエクスポート(.json)",
"Export Chats": "チャットをエクスポート",
"Export Config to JSON File": "JSONファイルのエクスポートConfig",
"Export Config to JSON File": "設定をJSONファイルでエクスポート",
"Export Documents Mapping": "ドキュメントマッピングをエクスポート",
"Export Functions": "Functionのエクスポート",
"Export LiteLLM config.yaml": "",
@ -312,13 +312,13 @@
"Filter is now globally enabled": "グローバルフィルタが有効です。",
"Filters": "フィルター",
"Fingerprint spoofing detected: Unable to use initials as avatar. Defaulting to default profile image.": "指紋のなりすましが検出されました: イニシャルをアバターとして使用できません。デフォルトのプロファイル画像にデフォルト設定されています。",
"Fluidly stream large external response chunks": "大規模な外部応答チャンクを流動的にストリーミングする",
"Fluidly stream large external response chunks": "大規模な外部応答チャンクをスムーズにストリーミングする",
"Focus chat input": "チャット入力をフォーカス",
"Followed instructions perfectly": "完全に指示に従った",
"Form": "フォーム",
"Format your variables using square brackets like this:": "次のように角括弧を使用して変数をフォーマットします。",
"Frequency Penalty": "繰り返しペナルティ",
"Function created successfully": "Functonの作成が成功しました。",
"Frequency Penalty": "頻度ペナルティ",
"Function created successfully": "Functionの作成が成功しました。",
"Function deleted successfully": "Functionの削除が成功しました。",
"Function Description (e.g. A filter to remove profanity from text)": "Function詳細",
"Function ID (e.g. my_filter)": "",
@ -329,7 +329,7 @@
"Functions": "",
"Functions allow arbitrary code execution": "",
"Functions allow arbitrary code execution.": "",
"Functions imported successfully": "",
"Functions imported successfully": "Functionsのインポートが成功しました",
"General": "一般",
"General Settings": "一般設定",
"Generate Image": "",
@ -341,12 +341,12 @@
"Google PSE API Key": "Google PSE APIキー",
"Google PSE Engine Id": "Google PSE エンジン ID",
"h:mm a": "h:mm a",
"Haptic Feedback": "",
"Haptic Feedback": "触覚フィードバック",
"has no conversations.": "対話はありません。",
"Hello, {{name}}": "こんにちは、{{name}} さん",
"Help": "ヘルプ",
"Hide": "非表示",
"Hide Model": "",
"Hide Model": "モデルを隠す",
"How can I help you today?": "今日はどのようにお手伝いしましょうか?",
"Hybrid Search": "ブリッジ検索",
"I acknowledge that I have read and I understand the implications of my action. I am aware of the risks associated with executing arbitrary code and I have verified the trustworthiness of the source.": "",
@ -355,7 +355,7 @@
"Image Settings": "画像設定",
"Images": "画像",
"Import Chats": "チャットをインポート",
"Import Config from JSON File": "",
"Import Config from JSON File": "設定をJSONファイルからインポート",
"Import Documents Mapping": "ドキュメントマッピングをインポート",
"Import Functions": "Functionのインポート",
"Import Models": "モデルのインポート",
@ -379,34 +379,34 @@
"JWT Token": "JWT トークン",
"Keep Alive": "キープアライブ",
"Keyboard shortcuts": "キーボードショートカット",
"Knowledge": "RAGファイル",
"Knowledge created successfully.": "RAGファイル識別タグ作成完了",
"Knowledge deleted successfully.": "RAGファイル識別タグ削除完了",
"Knowledge reset successfully.": "RAGファイルリセット",
"Knowledge updated successfully": "RAGファイルアップデート完了",
"Knowledge": "知識",
"Knowledge created successfully.": "知識の作成に成功しました",
"Knowledge deleted successfully.": "知識の削除に成功しました",
"Knowledge reset successfully.": "知識のリセットに成功しました",
"Knowledge updated successfully": "知識のアップデートに成功しました",
"Landing Page Mode": "ランディングページモード",
"Language": "言語",
"large language models, locally.": "",
"Last Active": "最終アクティブ",
"Last Modified": "",
"Leave empty for unlimited": "",
"Leave empty to use the default prompt, or enter a custom prompt": "",
"Leave empty for unlimited": "空欄なら無制限",
"Leave empty to use the default prompt, or enter a custom prompt": "カスタムプロンプトを入力。空欄ならデフォルトプロンプト",
"Light": "ライト",
"Listening...": "",
"LLMs can make mistakes. Verify important information.": "LLM は間違いを犯す可能性があります。重要な情報を検証してください。",
"Local Models": "",
"Local Models": "ローカルモデル",
"LTR": "LTR",
"Made by OpenWebUI Community": "OpenWebUI コミュニティによって作成",
"Make sure to enclose them with": "必ず次で囲んでください",
"Make sure to export a workflow.json file as API format from ComfyUI.": "",
"Manage": "",
"Manage": "管理",
"Manage Models": "モデルを管理",
"Manage Ollama Models": "Ollama モデルを管理",
"Manage Pipelines": "パイプラインの管理",
"March": "3月",
"Max Tokens (num_predict)": "最大トークン数 (num_predict)",
"Max Upload Count": "",
"Max Upload Size": "",
"Max Upload Count": "最大アップロード数",
"Max Upload Size": "最大アップロードサイズ",
"Maximum of 3 models can be downloaded simultaneously. Please try again later.": "同時にダウンロードできるモデルは最大 3 つです。後でもう一度お試しください。",
"May": "5月",
"Memories accessible by LLMs will be shown here.": "LLM がアクセスできるメモリはここに表示されます。",
@ -452,7 +452,7 @@
"No content to speak": "",
"No file selected": "",
"No HTML, CSS, or JavaScript content found.": "",
"No knowledge found": "",
"No knowledge found": "知識が見つかりません",
"No results found": "結果が見つかりません",
"No search query generated": "検索クエリは生成されません",
"No source available": "使用可能なソースがありません",
@ -463,7 +463,7 @@
"Notifications": "デスクトップ通知",
"November": "11月",
"num_gpu (Ollama)": "",
"num_thread (Ollama)": "num_thread(オラマ)",
"num_thread (Ollama)": "",
"OAuth ID": "",
"October": "10月",
"Off": "オフ",
@ -509,14 +509,14 @@
"Pipeline deleted successfully": "",
"Pipeline downloaded successfully": "",
"Pipelines": "パイプライン",
"Pipelines Not Detected": "",
"Pipelines Not Detected": "パイプラインは検出されませんでした",
"Pipelines Valves": "パイプラインバルブ",
"Plain text (.txt)": "プレーンテキスト (.txt)",
"Playground": "プレイグラウンド",
"Please carefully review the following warnings:": "",
"Please fill in all fields.": "",
"Please select a reason": "",
"Positive attitude": "陽気な態度",
"Positive attitude": "前向きな態度",
"Previous 30 days": "前の30日間",
"Previous 7 days": "前の7日間",
"Profile Image": "プロフィール画像",
@ -532,7 +532,7 @@
"Record voice": "音声を録音",
"Redirecting you to OpenWebUI Community": "OpenWebUI コミュニティにリダイレクトしています",
"Refer to yourself as \"User\" (e.g., \"User is learning Spanish\")": "",
"Refused when it shouldn't have": "許可されないのに許可されました",
"Refused when it shouldn't have": "拒否すべきでないのに拒否した",
"Regenerate": "再生成",
"Release Notes": "リリースノート",
"Remove": "削除",
@ -544,11 +544,11 @@
"Reranking model disabled": "再ランキングモデルが無効です",
"Reranking model set to \"{{reranking_model}}\"": "再ランキングモデルを \"{{reranking_model}}\" に設定しました",
"Reset": "",
"Reset Upload Directory": "",
"Reset Upload Directory": "アップロードディレクトリをリセット",
"Reset Vector Storage": "ベクトルストレージをリセット",
"Response AutoCopy to Clipboard": "クリップボードへの応答の自動コピー",
"Response notifications cannot be activated as the website permissions have been denied. Please visit your browser settings to grant the necessary access.": "",
"Response splitting": "",
"Response splitting": "応答の分割",
"Role": "役割",
"Rosé Pine": "Rosé Pine",
"Rosé Pine Dawn": "Rosé Pine Dawn",
@ -563,17 +563,17 @@
"Save Tag": "",
"Saved": "",
"Saving chat logs directly to your browser's storage is no longer supported. Please take a moment to download and delete your chat logs by clicking the button below. Don't worry, you can easily re-import your chat logs to the backend through": "チャットログをブラウザのストレージに直接保存する機能はサポートされなくなりました。下のボタンをクリックして、チャットログをダウンロードして削除してください。ご心配なく。チャットログは、次の方法でバックエンドに簡単に再インポートできます。",
"Scroll to bottom when switching between branches": "",
"Scroll to bottom when switching between branches": "ブランチの切り替え時にボタンをスクロールする",
"Search": "検索",
"Search a model": "モデルを検索",
"Search Chats": "チャットの検索",
"Search Collection": "Collectionの検索",
"Search Documents": "ドキュメントを検索",
"Search Functions": "Functionの検索",
"Search Knowledge": "RAGファイルの検索",
"Search Knowledge": "知識の検索",
"Search Models": "モデル検索",
"Search Prompts": "プロンプトを検索",
"Search Query Generation Prompt": "Query生成プロンプトの検索",
"Search Query Generation Prompt": "検索クエリ生成プロンプト",
"Search Result Count": "検索結果数",
"Search Tools": "ツールの検索",
"SearchApi API Key": "SearchApiのAPIKey",
@ -594,7 +594,7 @@
"Select a tool": "ツールの選択",
"Select an Ollama instance": "Ollama インスタンスを選択",
"Select Engine": "エンジンの選択",
"Select Knowledge": "RAGデータの選択",
"Select Knowledge": "知識の選択",
"Select model": "モデルを選択",
"Select only one model to call": "",
"Select/Add Files": "",
@ -643,10 +643,10 @@
"Speech-to-Text Engine": "音声テキスト変換エンジン",
"Stop Sequence": "ストップシーケンス",
"Stream Chat Response": "",
"STT Model": "",
"STT Settings": "STT 設定",
"STT Model": "STTモデル",
"STT Settings": "STT設定",
"Submit": "送信",
"Subtitle (e.g. about the Roman Empire)": "タイトル (例: ロマ帝国)",
"Subtitle (e.g. about the Roman Empire)": "タイトル (例: ロマ帝国)",
"Success": "成功",
"Successfully updated.": "正常に更新されました。",
"Suggested": "提案",
@ -659,9 +659,9 @@
"Tap to interrupt": "",
"Tavily API Key": "",
"Tell us more:": "もっと話してください:",
"Temperature": "生成時予測幅(Tenperature)",
"Temperature": "温度",
"Template": "テンプレート",
"Temporary Chat": "",
"Temporary Chat": "一時的なチャット",
"Text Completion": "テキスト補完",
"Text-to-Speech Engine": "テキスト音声変換エンジン",
"Tfs Z": "Tfs Z",
@ -672,9 +672,9 @@
"The score should be a value between 0.0 (0%) and 1.0 (100%).": "スコアは0.0(0%)から1.0(100%)の間の値にしてください。",
"Theme": "テーマ",
"Thinking...": "思考中...",
"This action cannot be undone. Do you wish to continue?": "",
"This action cannot be undone. Do you wish to continue?": "このアクションは取り消し不可です。続けますか?",
"This ensures that your valuable conversations are securely saved to your backend database. Thank you!": "これは、貴重な会話がバックエンドデータベースに安全に保存されることを保証します。ありがとうございます!",
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "開発中の機能であり正常動作しない場合があります。",
"This is an experimental feature, it may not function as expected and is subject to change at any time.": "実験的機能であり正常動作しない場合があります。",
"This option will delete all existing files in the collection and replace them with newly uploaded files.": "",
"This will delete": "",
"This will reset the knowledge base and sync all files. Do you wish to continue?": "",
@ -726,7 +726,7 @@
"Update for the latest features and improvements.": "",
"Update password": "パスワードを更新",
"Updated at": "",
"Upload": "",
"Upload": "アップロード",
"Upload a GGUF model": "GGUF モデルをアップロード",
"Upload directory": "アップロードディレクトリ",
"Upload files": "アップロードファイル",
@ -734,12 +734,12 @@
"Upload Pipeline": "アップロードパイプライン",
"Upload Progress": "アップロードの進行状況",
"URL Mode": "URL モード",
"Use '#' in the prompt input to load and include your knowledge.": "#を入力するとRAGデータを参照することが出来ます。",
"Use '#' in the prompt input to load and include your knowledge.": "#を入力すると知識データを参照することが出来ます。",
"Use '#' in the prompt input to load and select your documents.": "プロンプト入力で '#' を使用して、ドキュメントを読み込んで選択します。",
"Use Gravatar": "Gravatar を使用する",
"Use Initials": "初期値を使用する",
"use_mlock (Ollama)": "use_mlock(オラマ)",
"use_mmap (Ollama)": "use_mmap(オラマ)",
"use_mlock (Ollama)": "",
"use_mmap (Ollama)": "",
"user": "ユーザー",
"User location successfully retrieved.": "",
"User Permissions": "ユーザー権限",
@ -755,10 +755,10 @@
"Version {{selectedVersion}} of {{totalVersions}}": "",
"Voice": "ボイス",
"Warning": "警告",
"Warning:": "警告",
"Warning:": "警告:",
"Warning: If you update or change your embedding model, you will need to re-import all documents.": "警告: 埋め込みモデルを更新または変更した場合は、すべてのドキュメントを再インポートする必要があります。",
"Web": "ウェブ",
"Web API": "",
"Web API": "ウェブAPI",
"Web Loader Settings": "Web 読み込み設定",
"Web Search": "ウェブ検索",
"Web Search Engine": "ウェブ検索エンジン",
@ -767,7 +767,7 @@
"WebUI will make requests to": "WebUI は次に対してリクエストを行います",
"Whats New in": "新機能",
"Whisper (Local)": "",
"Widescreen Mode": "",
"Widescreen Mode": "ワイドスクリーンモード",
"Workspace": "ワークスペース",
"Write a prompt suggestion (e.g. Who are you?)": "プロンプトの提案を書いてください (例: あなたは誰ですか?)",
"Write a summary in 50 words that summarizes [topic or keyword].": "[トピックまたはキーワード] を要約する 50 語の概要を書いてください。",
@ -775,13 +775,13 @@
"You": "あなた",
"You can only chat with a maximum of {{maxCount}} file(s) at a time.": "",
"You can personalize your interactions with LLMs by adding memories through the 'Manage' button below, making them more helpful and tailored to you.": "",
"You cannot clone a base model": "基本モデルのクローンは作成できない",
"You cannot clone a base model": "基本モデルのクローンは作成できません",
"You have no archived conversations.": "これまでにアーカイブされた会話はありません。",
"You have shared this chat": "このチャットを共有しました",
"You're a helpful assistant.": "あなたは有能なアシスタントです。",
"You're now logged in.": "ログインしました。",
"Your account status is currently pending activation.": "貴方のアカウント状態は現在登録認証待ちです。",
"Your account status is currently pending activation.": "あなたのアカウント状態は現在登録認証待ちです。",
"Your entire contribution will go directly to the plugin developer; Open WebUI does not take any percentage. However, the chosen funding platform might have its own fees.": "",
"Youtube": "YouTube",
"Youtube Loader Settings": "Youtubeローダー設定(日本語はja)"
"Youtube Loader Settings": "YouTubeローダー設定(日本語はja)"
}

View File

@ -444,7 +444,7 @@ const convertOpenAIMessages = (convo) => {
};
const validateChat = (chat) => {
// Because ChatGPT sometimes has features we can't use like DALL-E or migh have corrupted messages, need to validate
// Because ChatGPT sometimes has features we can't use like DALL-E or might have corrupted messages, need to validate
const messages = chat.messages;
// Check if messages array is empty

View File

@ -33,7 +33,7 @@ function generateRegexRules(delimiters) {
const escapedRight = escapeRegex(right);
if (!display) {
// For inline delimiters, we match everyting
// For inline delimiters, we match everything
inlinePatterns.push(`${escapedLeft}((?:\\\\[^]|[^\\\\])+?)${escapedRight}`);
} else {
// Block delimiters doubles as inline delimiters when not followed by a newline