mirror of
https://github.com/open-webui/open-webui
synced 2025-06-26 18:26:48 +00:00
Merge branch 'dev' into searxng
This commit is contained in:
@@ -46,6 +46,7 @@ from config import (
|
||||
SRC_LOG_LEVELS,
|
||||
OLLAMA_BASE_URLS,
|
||||
ENABLE_OLLAMA_API,
|
||||
AIOHTTP_CLIENT_TIMEOUT,
|
||||
ENABLE_MODEL_FILTER,
|
||||
MODEL_FILTER_LIST,
|
||||
UPLOAD_DIR,
|
||||
@@ -154,7 +155,9 @@ async def cleanup_response(
|
||||
async def post_streaming_url(url: str, payload: str):
|
||||
r = None
|
||||
try:
|
||||
session = aiohttp.ClientSession(trust_env=True)
|
||||
session = aiohttp.ClientSession(
|
||||
trust_env=True, timeout=aiohttp.ClientTimeout(total=AIOHTTP_CLIENT_TIMEOUT)
|
||||
)
|
||||
r = await session.post(url, data=payload)
|
||||
r.raise_for_status()
|
||||
|
||||
@@ -751,6 +754,14 @@ async def generate_chat_completion(
|
||||
if model_info.params.get("num_ctx", None):
|
||||
payload["options"]["num_ctx"] = model_info.params.get("num_ctx", None)
|
||||
|
||||
if model_info.params.get("num_batch", None):
|
||||
payload["options"]["num_batch"] = model_info.params.get(
|
||||
"num_batch", None
|
||||
)
|
||||
|
||||
if model_info.params.get("num_keep", None):
|
||||
payload["options"]["num_keep"] = model_info.params.get("num_keep", None)
|
||||
|
||||
if model_info.params.get("repeat_last_n", None):
|
||||
payload["options"]["repeat_last_n"] = model_info.params.get(
|
||||
"repeat_last_n", None
|
||||
|
||||
@@ -73,6 +73,7 @@ from apps.rag.search.serper import search_serper
|
||||
from apps.rag.search.serpstack import search_serpstack
|
||||
from apps.rag.search.serply import search_serply
|
||||
from apps.rag.search.duckduckgo import search_duckduckgo
|
||||
from apps.rag.search.tavily import search_tavily
|
||||
|
||||
from utils.misc import (
|
||||
calculate_sha256,
|
||||
@@ -120,6 +121,7 @@ from config import (
|
||||
SERPSTACK_HTTPS,
|
||||
SERPER_API_KEY,
|
||||
SERPLY_API_KEY,
|
||||
TAVILY_API_KEY,
|
||||
RAG_WEB_SEARCH_RESULT_COUNT,
|
||||
RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
|
||||
RAG_EMBEDDING_OPENAI_BATCH_SIZE,
|
||||
@@ -174,6 +176,7 @@ app.state.config.SERPSTACK_API_KEY = SERPSTACK_API_KEY
|
||||
app.state.config.SERPSTACK_HTTPS = SERPSTACK_HTTPS
|
||||
app.state.config.SERPER_API_KEY = SERPER_API_KEY
|
||||
app.state.config.SERPLY_API_KEY = SERPLY_API_KEY
|
||||
app.state.config.TAVILY_API_KEY = TAVILY_API_KEY
|
||||
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = RAG_WEB_SEARCH_RESULT_COUNT
|
||||
app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = RAG_WEB_SEARCH_CONCURRENT_REQUESTS
|
||||
|
||||
@@ -402,6 +405,7 @@ async def get_rag_config(user=Depends(get_admin_user)):
|
||||
"serpstack_https": app.state.config.SERPSTACK_HTTPS,
|
||||
"serper_api_key": app.state.config.SERPER_API_KEY,
|
||||
"serply_api_key": app.state.config.SERPLY_API_KEY,
|
||||
"tavily_api_key": app.state.config.TAVILY_API_KEY,
|
||||
"result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
|
||||
"concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
|
||||
},
|
||||
@@ -430,6 +434,7 @@ class WebSearchConfig(BaseModel):
|
||||
serpstack_https: Optional[bool] = None
|
||||
serper_api_key: Optional[str] = None
|
||||
serply_api_key: Optional[str] = None
|
||||
tavily_api_key: Optional[str] = None
|
||||
result_count: Optional[int] = None
|
||||
concurrent_requests: Optional[int] = None
|
||||
|
||||
@@ -481,6 +486,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
|
||||
app.state.config.SERPSTACK_HTTPS = form_data.web.search.serpstack_https
|
||||
app.state.config.SERPER_API_KEY = form_data.web.search.serper_api_key
|
||||
app.state.config.SERPLY_API_KEY = form_data.web.search.serply_api_key
|
||||
app.state.config.TAVILY_API_KEY = form_data.web.search.tavily_api_key
|
||||
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT = form_data.web.search.result_count
|
||||
app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS = (
|
||||
form_data.web.search.concurrent_requests
|
||||
@@ -510,6 +516,7 @@ async def update_rag_config(form_data: ConfigUpdateForm, user=Depends(get_admin_
|
||||
"serpstack_https": app.state.config.SERPSTACK_HTTPS,
|
||||
"serper_api_key": app.state.config.SERPER_API_KEY,
|
||||
"serply_api_key": app.state.config.SERPLY_API_KEY,
|
||||
"tavily_api_key": app.state.config.TAVILY_API_KEY,
|
||||
"result_count": app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
|
||||
"concurrent_requests": app.state.config.RAG_WEB_SEARCH_CONCURRENT_REQUESTS,
|
||||
},
|
||||
@@ -758,7 +765,7 @@ def search_web(engine: str, query: str) -> list[SearchResult]:
|
||||
- SERPSTACK_API_KEY
|
||||
- SERPER_API_KEY
|
||||
- SERPLY_API_KEY
|
||||
|
||||
- TAVILY_API_KEY
|
||||
Args:
|
||||
query (str): The query to search for
|
||||
"""
|
||||
@@ -833,6 +840,15 @@ def search_web(engine: str, query: str) -> list[SearchResult]:
|
||||
raise Exception("No SERPLY_API_KEY found in environment variables")
|
||||
elif engine == "duckduckgo":
|
||||
return search_duckduckgo(query, app.state.config.RAG_WEB_SEARCH_RESULT_COUNT, app.state.config.RAG_WEB_SEARCH_WHITE_LIST_DOMAINS)
|
||||
elif engine == "tavily":
|
||||
if app.state.config.TAVILY_API_KEY:
|
||||
return search_tavily(
|
||||
app.state.config.TAVILY_API_KEY,
|
||||
query,
|
||||
app.state.config.RAG_WEB_SEARCH_RESULT_COUNT,
|
||||
)
|
||||
else:
|
||||
raise Exception("No TAVILY_API_KEY found in environment variables")
|
||||
else:
|
||||
raise Exception("No search engine API key found in environment variables")
|
||||
|
||||
|
||||
39
backend/apps/rag/search/tavily.py
Normal file
39
backend/apps/rag/search/tavily.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import logging
|
||||
|
||||
import requests
|
||||
|
||||
from apps.rag.search.main import SearchResult
|
||||
from config import SRC_LOG_LEVELS
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
log.setLevel(SRC_LOG_LEVELS["RAG"])
|
||||
|
||||
|
||||
def search_tavily(api_key: str, query: str, count: int) -> list[SearchResult]:
|
||||
"""Search using Tavily's Search API and return the results as a list of SearchResult objects.
|
||||
|
||||
Args:
|
||||
api_key (str): A Tavily Search API key
|
||||
query (str): The query to search for
|
||||
|
||||
Returns:
|
||||
List[SearchResult]: A list of search results
|
||||
"""
|
||||
url = "https://api.tavily.com/search"
|
||||
data = {"query": query, "api_key": api_key}
|
||||
|
||||
response = requests.post(url, json=data)
|
||||
response.raise_for_status()
|
||||
|
||||
json_response = response.json()
|
||||
|
||||
raw_search_results = json_response.get("results", [])
|
||||
|
||||
return [
|
||||
SearchResult(
|
||||
link=result["url"],
|
||||
title=result.get("title", ""),
|
||||
snippet=result.get("content"),
|
||||
)
|
||||
for result in raw_search_results[:count]
|
||||
]
|
||||
@@ -65,6 +65,20 @@ class MemoriesTable:
|
||||
else:
|
||||
return None
|
||||
|
||||
def update_memory_by_id(
|
||||
self,
|
||||
id: str,
|
||||
content: str,
|
||||
) -> Optional[MemoryModel]:
|
||||
try:
|
||||
memory = Memory.get(Memory.id == id)
|
||||
memory.content = content
|
||||
memory.updated_at = int(time.time())
|
||||
memory.save()
|
||||
return MemoryModel(**model_to_dict(memory))
|
||||
except:
|
||||
return None
|
||||
|
||||
def get_memories(self) -> List[MemoryModel]:
|
||||
try:
|
||||
memories = Memory.select()
|
||||
|
||||
@@ -44,6 +44,10 @@ class AddMemoryForm(BaseModel):
|
||||
content: str
|
||||
|
||||
|
||||
class MemoryUpdateModel(BaseModel):
|
||||
content: Optional[str] = None
|
||||
|
||||
|
||||
@router.post("/add", response_model=Optional[MemoryModel])
|
||||
async def add_memory(
|
||||
request: Request, form_data: AddMemoryForm, user=Depends(get_verified_user)
|
||||
@@ -62,6 +66,34 @@ async def add_memory(
|
||||
return memory
|
||||
|
||||
|
||||
@router.post("/{memory_id}/update", response_model=Optional[MemoryModel])
|
||||
async def update_memory_by_id(
|
||||
memory_id: str,
|
||||
request: Request,
|
||||
form_data: MemoryUpdateModel,
|
||||
user=Depends(get_verified_user),
|
||||
):
|
||||
memory = Memories.update_memory_by_id(memory_id, form_data.content)
|
||||
if memory is None:
|
||||
raise HTTPException(status_code=404, detail="Memory not found")
|
||||
|
||||
if form_data.content is not None:
|
||||
memory_embedding = request.app.state.EMBEDDING_FUNCTION(form_data.content)
|
||||
collection = CHROMA_CLIENT.get_or_create_collection(
|
||||
name=f"user-memory-{user.id}"
|
||||
)
|
||||
collection.upsert(
|
||||
documents=[form_data.content],
|
||||
ids=[memory.id],
|
||||
embeddings=[memory_embedding],
|
||||
metadatas=[
|
||||
{"created_at": memory.created_at, "updated_at": memory.updated_at}
|
||||
],
|
||||
)
|
||||
|
||||
return memory
|
||||
|
||||
|
||||
############################
|
||||
# QueryMemory
|
||||
############################
|
||||
|
||||
@@ -425,6 +425,7 @@ OLLAMA_API_BASE_URL = os.environ.get(
|
||||
)
|
||||
|
||||
OLLAMA_BASE_URL = os.environ.get("OLLAMA_BASE_URL", "")
|
||||
AIOHTTP_CLIENT_TIMEOUT = int(os.environ.get("AIOHTTP_CLIENT_TIMEOUT", "300"))
|
||||
K8S_FLAG = os.environ.get("K8S_FLAG", "")
|
||||
USE_OLLAMA_DOCKER = os.environ.get("USE_OLLAMA_DOCKER", "false")
|
||||
|
||||
@@ -951,6 +952,11 @@ SERPLY_API_KEY = PersistentConfig(
|
||||
os.getenv("SERPLY_API_KEY", ""),
|
||||
)
|
||||
|
||||
TAVILY_API_KEY = PersistentConfig(
|
||||
"TAVILY_API_KEY",
|
||||
"rag.web.search.tavily_api_key",
|
||||
os.getenv("TAVILY_API_KEY", ""),
|
||||
)
|
||||
|
||||
RAG_WEB_SEARCH_RESULT_COUNT = PersistentConfig(
|
||||
"RAG_WEB_SEARCH_RESULT_COUNT",
|
||||
|
||||
@@ -494,6 +494,9 @@ def filter_pipeline(payload, user):
|
||||
if "title" in payload:
|
||||
del payload["title"]
|
||||
|
||||
if "task" in payload:
|
||||
del payload["task"]
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
@@ -835,6 +838,71 @@ async def generate_search_query(form_data: dict, user=Depends(get_verified_user)
|
||||
"messages": [{"role": "user", "content": content}],
|
||||
"stream": False,
|
||||
"max_tokens": 30,
|
||||
"task": True,
|
||||
}
|
||||
|
||||
print(payload)
|
||||
|
||||
try:
|
||||
payload = filter_pipeline(payload, user)
|
||||
except Exception as e:
|
||||
return JSONResponse(
|
||||
status_code=e.args[0],
|
||||
content={"detail": e.args[1]},
|
||||
)
|
||||
|
||||
if model["owned_by"] == "ollama":
|
||||
return await generate_ollama_chat_completion(
|
||||
OpenAIChatCompletionForm(**payload), user=user
|
||||
)
|
||||
else:
|
||||
return await generate_openai_chat_completion(payload, user=user)
|
||||
|
||||
|
||||
@app.post("/api/task/emoji/completions")
|
||||
async def generate_emoji(form_data: dict, user=Depends(get_verified_user)):
|
||||
print("generate_emoji")
|
||||
|
||||
model_id = form_data["model"]
|
||||
if model_id not in app.state.MODELS:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Model not found",
|
||||
)
|
||||
|
||||
# Check if the user has a custom task model
|
||||
# If the user has a custom task model, use that model
|
||||
if app.state.MODELS[model_id]["owned_by"] == "ollama":
|
||||
if app.state.config.TASK_MODEL:
|
||||
task_model_id = app.state.config.TASK_MODEL
|
||||
if task_model_id in app.state.MODELS:
|
||||
model_id = task_model_id
|
||||
else:
|
||||
if app.state.config.TASK_MODEL_EXTERNAL:
|
||||
task_model_id = app.state.config.TASK_MODEL_EXTERNAL
|
||||
if task_model_id in app.state.MODELS:
|
||||
model_id = task_model_id
|
||||
|
||||
print(model_id)
|
||||
model = app.state.MODELS[model_id]
|
||||
|
||||
template = '''
|
||||
Your task is to reflect the speaker's likely facial expression through a fitting emoji. Interpret emotions from the message and reflect their facial expression using fitting, diverse emojis (e.g., 😊, 😢, 😡, 😱).
|
||||
|
||||
Message: """{{prompt}}"""
|
||||
'''
|
||||
|
||||
content = title_generation_template(
|
||||
template, form_data["prompt"], user.model_dump()
|
||||
)
|
||||
|
||||
payload = {
|
||||
"model": model_id,
|
||||
"messages": [{"role": "user", "content": content}],
|
||||
"stream": False,
|
||||
"max_tokens": 4,
|
||||
"chat_id": form_data.get("chat_id", None),
|
||||
"task": True,
|
||||
}
|
||||
|
||||
print(payload)
|
||||
|
||||
Reference in New Issue
Block a user