Merge pull request #8594 from jayteaftw/main

feat: Support for instruct/prefixing embeddings
This commit is contained in:
Timothy Jaeryang Baek 2025-03-30 21:54:44 -07:00 committed by GitHub
commit 433b5bddc1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 72 additions and 32 deletions

View File

@ -1783,6 +1783,18 @@ RAG_EMBEDDING_BATCH_SIZE = PersistentConfig(
),
)
RAG_EMBEDDING_QUERY_PREFIX = (
os.environ.get("RAG_EMBEDDING_QUERY_PREFIX", None)
)
RAG_EMBEDDING_PASSAGE_PREFIX = (
os.environ.get("RAG_EMBEDDING_PASSAGE_PREFIX", None)
)
RAG_EMBEDDING_PREFIX_FIELD_NAME = (
os.environ.get("RAG_EMBEDDING_PREFIX_FIELD_NAME", None)
)
RAG_RERANKING_MODEL = PersistentConfig(
"RAG_RERANKING_MODEL",
"rag.reranking_model",

View File

@ -18,11 +18,17 @@ from open_webui.models.files import Files
from open_webui.retrieval.vector.main import GetResult
from open_webui.env import (
SRC_LOG_LEVELS,
OFFLINE_MODE,
ENABLE_FORWARD_USER_INFO_HEADERS,
)
from open_webui.config import (
RAG_EMBEDDING_QUERY_PREFIX,
RAG_EMBEDDING_PASSAGE_PREFIX,
RAG_EMBEDDING_PREFIX_FIELD_NAME
)
log = logging.getLogger(__name__)
log.setLevel(SRC_LOG_LEVELS["RAG"])
@ -47,7 +53,7 @@ class VectorSearchRetriever(BaseRetriever):
) -> list[Document]:
result = VECTOR_DB_CLIENT.search(
collection_name=self.collection_name,
vectors=[self.embedding_function(query)],
vectors=[self.embedding_function(query,RAG_EMBEDDING_QUERY_PREFIX)],
limit=self.top_k,
)
@ -250,7 +256,7 @@ def query_collection(
) -> dict:
results = []
for query in queries:
query_embedding = embedding_function(query)
query_embedding = embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
for collection_name in collection_names:
if collection_name:
try:
@ -328,33 +334,33 @@ def get_embedding_function(
embedding_batch_size,
):
if embedding_engine == "":
return lambda query, user=None: embedding_function.encode(query).tolist()
return lambda query, prefix, user=None: embedding_function.encode(query, prompt = prefix if prefix else None).tolist()
elif embedding_engine in ["ollama", "openai"]:
func = lambda query, user=None: generate_embeddings(
func = lambda query, prefix, user=None: generate_embeddings(
engine=embedding_engine,
model=embedding_model,
text=query,
prefix=prefix,
url=url,
key=key,
user=user,
)
def generate_multiple(query, user, func):
def generate_multiple(query, prefix, user, func):
if isinstance(query, list):
embeddings = []
for i in range(0, len(query), embedding_batch_size):
embeddings.extend(
func(query[i : i + embedding_batch_size], user=user)
func(query[i : i + embedding_batch_size], prefix=prefix, user=user)
)
return embeddings
else:
return func(query, user)
return lambda query, user=None: generate_multiple(query, user, func)
return func(query, prefix, user)
return lambda query, prefix, user=None: generate_multiple(query, prefix, user, func)
else:
raise ValueError(f"Unknown embedding engine: {embedding_engine}")
def get_sources_from_files(
request,
files,
@ -572,9 +578,17 @@ def generate_openai_batch_embeddings(
texts: list[str],
url: str = "https://api.openai.com/v1",
key: str = "",
user: UserModel = None,
prefix: str = None,
user: UserModel = None
) -> Optional[list[list[float]]]:
try:
json_data = {
"input": texts,
"model": model
}
if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
r = requests.post(
f"{url}/embeddings",
headers={
@ -591,7 +605,7 @@ def generate_openai_batch_embeddings(
else {}
),
},
json={"input": texts, "model": model},
json=json_data,
)
r.raise_for_status()
data = r.json()
@ -605,9 +619,21 @@ def generate_openai_batch_embeddings(
def generate_ollama_batch_embeddings(
model: str, texts: list[str], url: str, key: str = "", user: UserModel = None
model: str,
texts: list[str],
url: str,
key: str = "",
prefix: str = None,
user: UserModel = None
) -> Optional[list[list[float]]]:
try:
json_data = {
"input": texts,
"model": model
}
if isinstance(RAG_EMBEDDING_PREFIX_FIELD_NAME,str) and isinstance(prefix,str):
json_data[RAG_EMBEDDING_PREFIX_FIELD_NAME] = prefix
r = requests.post(
f"{url}/api/embed",
headers={
@ -624,7 +650,7 @@ def generate_ollama_batch_embeddings(
else {}
),
},
json={"input": texts, "model": model},
json=json_data,
)
r.raise_for_status()
data = r.json()
@ -638,33 +664,32 @@ def generate_ollama_batch_embeddings(
return None
def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], **kwargs):
def generate_embeddings(engine: str, model: str, text: Union[str, list[str]], prefix: Union[str , None] = None, **kwargs):
url = kwargs.get("url", "")
key = kwargs.get("key", "")
user = kwargs.get("user")
if prefix is not None and RAG_EMBEDDING_PREFIX_FIELD_NAME is None:
if isinstance(text, list):
text = [f'{prefix}{text_element}' for text_element in text]
else:
text = f'{prefix}{text}'
if engine == "ollama":
if isinstance(text, list):
embeddings = generate_ollama_batch_embeddings(
**{"model": model, "texts": text, "url": url, "key": key, "user": user}
**{"model": model, "texts": text, "url": url, "key": key, "prefix": prefix, "user": user}
)
else:
embeddings = generate_ollama_batch_embeddings(
**{
"model": model,
"texts": [text],
"url": url,
"key": key,
"user": user,
}
**{"model": model, "texts": [text], "url": url, "key": key, "prefix": prefix, "user": user}
)
return embeddings[0] if isinstance(text, str) else embeddings
elif engine == "openai":
if isinstance(text, list):
embeddings = generate_openai_batch_embeddings(model, text, url, key, user)
embeddings = generate_openai_batch_embeddings(model, text, url, key, prefix, user)
else:
embeddings = generate_openai_batch_embeddings(model, [text], url, key, user)
embeddings = generate_openai_batch_embeddings(model, [text], url, key, prefix, user)
return embeddings[0] if isinstance(text, str) else embeddings
@ -700,9 +725,10 @@ class RerankCompressor(BaseDocumentCompressor):
else:
from sentence_transformers import util
query_embedding = self.embedding_function(query)
query_embedding = self.embedding_function(query, RAG_EMBEDDING_QUERY_PREFIX)
document_embedding = self.embedding_function(
[doc.page_content for doc in documents]
[doc.page_content for doc in documents],
RAG_EMBEDDING_PASSAGE_PREFIX
)
scores = util.cos_sim(query_embedding, document_embedding)[0]

View File

@ -74,7 +74,6 @@ from open_webui.utils.misc import (
)
from open_webui.utils.auth import get_admin_user, get_verified_user
from open_webui.config import (
ENV,
RAG_EMBEDDING_MODEL_AUTO_UPDATE,
@ -83,6 +82,8 @@ from open_webui.config import (
RAG_RERANKING_MODEL_TRUST_REMOTE_CODE,
UPLOAD_DIR,
DEFAULT_LOCALE,
RAG_EMBEDDING_PASSAGE_PREFIX,
RAG_EMBEDDING_QUERY_PREFIX
)
from open_webui.env import (
SRC_LOG_LEVELS,
@ -891,7 +892,7 @@ def save_docs_to_vector_db(
)
embeddings = embedding_function(
list(map(lambda x: x.replace("\n", " "), texts)), user=user
list(map(lambda x: x.replace("\n", " "), texts)), prefix=RAG_EMBEDDING_PASSAGE_PREFIX, user=user
)
items = [
@ -1533,8 +1534,9 @@ def query_doc_handler(
return query_doc(
collection_name=form_data.collection_name,
query_embedding=request.app.state.EMBEDDING_FUNCTION(
form_data.query, user=user
form_data.query, prefix=RAG_EMBEDDING_QUERY_PREFIX, user=user
),
k=form_data.k if form_data.k else request.app.state.config.TOP_K,
user=user,
)
@ -1661,7 +1663,7 @@ if ENV == "dev":
@router.get("/ef/{text}")
async def get_embeddings(request: Request, text: Optional[str] = "Hello World!"):
return {"result": request.app.state.EMBEDDING_FUNCTION(text)}
return {"result": request.app.state.EMBEDDING_FUNCTION(text, RAG_EMBEDDING_QUERY_PREFIX)}
class BatchProcessFilesForm(BaseModel):