2024-01-07 07:40:51 +00:00
|
|
|
from fastapi import (
|
|
|
|
FastAPI,
|
|
|
|
Request,
|
|
|
|
Depends,
|
|
|
|
HTTPException,
|
|
|
|
status,
|
|
|
|
UploadFile,
|
|
|
|
File,
|
|
|
|
Form,
|
|
|
|
)
|
2024-01-07 06:07:20 +00:00
|
|
|
from fastapi.middleware.cors import CORSMiddleware
|
2024-01-07 09:40:36 +00:00
|
|
|
import os, shutil
|
2024-02-18 05:06:08 +00:00
|
|
|
|
|
|
|
from pathlib import Path
|
2024-02-01 21:35:41 +00:00
|
|
|
from typing import List
|
2024-01-07 06:07:20 +00:00
|
|
|
|
2024-01-07 16:28:35 +00:00
|
|
|
# from chromadb.utils import embedding_functions
|
2024-01-07 06:07:20 +00:00
|
|
|
|
2024-01-07 17:05:52 +00:00
|
|
|
from langchain_community.document_loaders import (
|
|
|
|
WebBaseLoader,
|
|
|
|
TextLoader,
|
|
|
|
PyPDFLoader,
|
|
|
|
CSVLoader,
|
2024-01-07 21:56:01 +00:00
|
|
|
Docx2txtLoader,
|
2024-01-13 13:46:56 +00:00
|
|
|
UnstructuredEPubLoader,
|
2024-01-09 23:24:53 +00:00
|
|
|
UnstructuredWordDocumentLoader,
|
|
|
|
UnstructuredMarkdownLoader,
|
feat: Add RAG support for various programming languages
Enables RAG for golang, python, java, sh, bat, powershell, cmd, js, css, c/c++/c#, sql, logs, ini, perl, r, dart, docker, env, php, haskell, lua, conf, plsql, ruby, db2, scalla, bash, swift, vue, html, xml, and other arbitrary text files.
2024-01-17 07:09:47 +00:00
|
|
|
UnstructuredXMLLoader,
|
2024-01-19 17:48:04 +00:00
|
|
|
UnstructuredRSTLoader,
|
2024-01-23 21:03:22 +00:00
|
|
|
UnstructuredExcelLoader,
|
2024-01-07 17:05:52 +00:00
|
|
|
)
|
2024-01-07 06:59:22 +00:00
|
|
|
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
|
|
|
from langchain.chains import RetrievalQA
|
2024-02-18 05:06:08 +00:00
|
|
|
from langchain_community.vectorstores import Chroma
|
2024-01-07 06:59:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from typing import Optional
|
2024-02-18 05:06:08 +00:00
|
|
|
import mimetypes
|
2024-01-07 06:59:22 +00:00
|
|
|
import uuid
|
2024-02-18 05:06:08 +00:00
|
|
|
import json
|
2024-01-07 17:33:34 +00:00
|
|
|
import time
|
2024-01-07 06:59:22 +00:00
|
|
|
|
2024-02-18 05:06:08 +00:00
|
|
|
|
|
|
|
from apps.web.models.documents import (
|
|
|
|
Documents,
|
|
|
|
DocumentForm,
|
|
|
|
DocumentResponse,
|
|
|
|
)
|
|
|
|
|
|
|
|
from utils.misc import (
|
|
|
|
calculate_sha256,
|
|
|
|
calculate_sha256_string,
|
|
|
|
sanitize_filename,
|
|
|
|
extract_folders_after_data_docs,
|
|
|
|
)
|
2024-02-09 00:05:01 +00:00
|
|
|
from utils.utils import get_current_user, get_admin_user
|
2024-02-18 05:06:08 +00:00
|
|
|
from config import (
|
|
|
|
UPLOAD_DIR,
|
|
|
|
DOCS_DIR,
|
|
|
|
EMBED_MODEL,
|
|
|
|
CHROMA_CLIENT,
|
|
|
|
CHUNK_SIZE,
|
|
|
|
CHUNK_OVERLAP,
|
|
|
|
)
|
2024-01-07 06:59:22 +00:00
|
|
|
from constants import ERROR_MESSAGES
|
|
|
|
|
2024-01-07 16:28:35 +00:00
|
|
|
# EMBEDDING_FUNC = embedding_functions.SentenceTransformerEmbeddingFunction(
|
|
|
|
# model_name=EMBED_MODEL
|
|
|
|
# )
|
2024-01-07 06:07:20 +00:00
|
|
|
|
|
|
|
app = FastAPI()
|
|
|
|
|
|
|
|
origins = ["*"]
|
|
|
|
|
|
|
|
app.add_middleware(
|
|
|
|
CORSMiddleware,
|
|
|
|
allow_origins=origins,
|
|
|
|
allow_credentials=True,
|
|
|
|
allow_methods=["*"],
|
|
|
|
allow_headers=["*"],
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-01-07 07:40:51 +00:00
|
|
|
class CollectionNameForm(BaseModel):
|
2024-01-07 06:59:22 +00:00
|
|
|
collection_name: Optional[str] = "test"
|
|
|
|
|
|
|
|
|
2024-01-07 07:40:51 +00:00
|
|
|
class StoreWebForm(CollectionNameForm):
|
|
|
|
url: str
|
|
|
|
|
|
|
|
|
2024-01-07 09:40:36 +00:00
|
|
|
def store_data_in_vector_db(data, collection_name) -> bool:
|
2024-01-07 06:59:22 +00:00
|
|
|
text_splitter = RecursiveCharacterTextSplitter(
|
|
|
|
chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP
|
|
|
|
)
|
|
|
|
docs = text_splitter.split_documents(data)
|
|
|
|
|
|
|
|
texts = [doc.page_content for doc in docs]
|
|
|
|
metadatas = [doc.metadata for doc in docs]
|
|
|
|
|
2024-01-07 09:40:36 +00:00
|
|
|
try:
|
2024-01-07 16:34:05 +00:00
|
|
|
collection = CHROMA_CLIENT.create_collection(name=collection_name)
|
2024-01-07 06:59:22 +00:00
|
|
|
|
2024-01-07 09:40:36 +00:00
|
|
|
collection.add(
|
|
|
|
documents=texts, metadatas=metadatas, ids=[str(uuid.uuid1()) for _ in texts]
|
|
|
|
)
|
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
if e.__class__.__name__ == "UniqueConstraintError":
|
|
|
|
return True
|
|
|
|
|
|
|
|
return False
|
2024-01-07 06:59:22 +00:00
|
|
|
|
|
|
|
|
2024-01-07 06:07:20 +00:00
|
|
|
@app.get("/")
|
|
|
|
async def get_status():
|
|
|
|
return {"status": True}
|
2024-01-07 06:59:22 +00:00
|
|
|
|
|
|
|
|
2024-02-03 23:57:06 +00:00
|
|
|
class QueryDocForm(BaseModel):
|
2024-02-01 21:35:41 +00:00
|
|
|
collection_name: str
|
|
|
|
query: str
|
|
|
|
k: Optional[int] = 4
|
|
|
|
|
|
|
|
|
2024-02-03 23:57:06 +00:00
|
|
|
@app.post("/query/doc")
|
|
|
|
def query_doc(
|
|
|
|
form_data: QueryDocForm,
|
2024-01-07 10:46:12 +00:00
|
|
|
user=Depends(get_current_user),
|
|
|
|
):
|
2024-01-07 09:59:00 +00:00
|
|
|
try:
|
|
|
|
collection = CHROMA_CLIENT.get_collection(
|
2024-02-01 21:35:41 +00:00
|
|
|
name=form_data.collection_name,
|
2024-01-07 09:59:00 +00:00
|
|
|
)
|
2024-02-01 21:35:41 +00:00
|
|
|
result = collection.query(query_texts=[form_data.query], n_results=form_data.k)
|
2024-01-07 09:59:00 +00:00
|
|
|
return result
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
|
|
detail=ERROR_MESSAGES.DEFAULT(e),
|
|
|
|
)
|
2024-01-07 06:59:22 +00:00
|
|
|
|
|
|
|
|
2024-02-01 21:35:41 +00:00
|
|
|
class QueryCollectionsForm(BaseModel):
|
|
|
|
collection_names: List[str]
|
|
|
|
query: str
|
|
|
|
k: Optional[int] = 4
|
|
|
|
|
|
|
|
|
2024-02-03 22:44:49 +00:00
|
|
|
def merge_and_sort_query_results(query_results, k):
|
|
|
|
# Initialize lists to store combined data
|
|
|
|
combined_ids = []
|
|
|
|
combined_distances = []
|
|
|
|
combined_metadatas = []
|
|
|
|
combined_documents = []
|
|
|
|
|
|
|
|
# Combine data from each dictionary
|
|
|
|
for data in query_results:
|
|
|
|
combined_ids.extend(data["ids"][0])
|
|
|
|
combined_distances.extend(data["distances"][0])
|
|
|
|
combined_metadatas.extend(data["metadatas"][0])
|
|
|
|
combined_documents.extend(data["documents"][0])
|
|
|
|
|
|
|
|
# Create a list of tuples (distance, id, metadata, document)
|
|
|
|
combined = list(
|
|
|
|
zip(combined_distances, combined_ids, combined_metadatas, combined_documents)
|
|
|
|
)
|
|
|
|
|
|
|
|
# Sort the list based on distances
|
|
|
|
combined.sort(key=lambda x: x[0])
|
|
|
|
|
|
|
|
# Unzip the sorted list
|
|
|
|
sorted_distances, sorted_ids, sorted_metadatas, sorted_documents = zip(*combined)
|
|
|
|
|
|
|
|
# Slicing the lists to include only k elements
|
|
|
|
sorted_distances = list(sorted_distances)[:k]
|
|
|
|
sorted_ids = list(sorted_ids)[:k]
|
|
|
|
sorted_metadatas = list(sorted_metadatas)[:k]
|
|
|
|
sorted_documents = list(sorted_documents)[:k]
|
|
|
|
|
|
|
|
# Create the output dictionary
|
|
|
|
merged_query_results = {
|
|
|
|
"ids": [sorted_ids],
|
|
|
|
"distances": [sorted_distances],
|
|
|
|
"metadatas": [sorted_metadatas],
|
|
|
|
"documents": [sorted_documents],
|
|
|
|
"embeddings": None,
|
|
|
|
"uris": None,
|
|
|
|
"data": None,
|
|
|
|
}
|
|
|
|
|
|
|
|
return merged_query_results
|
|
|
|
|
|
|
|
|
2024-02-03 23:57:06 +00:00
|
|
|
@app.post("/query/collection")
|
|
|
|
def query_collection(
|
2024-02-01 21:35:41 +00:00
|
|
|
form_data: QueryCollectionsForm,
|
|
|
|
user=Depends(get_current_user),
|
|
|
|
):
|
|
|
|
results = []
|
|
|
|
|
|
|
|
for collection_name in form_data.collection_names:
|
|
|
|
try:
|
|
|
|
collection = CHROMA_CLIENT.get_collection(
|
|
|
|
name=collection_name,
|
|
|
|
)
|
|
|
|
result = collection.query(
|
|
|
|
query_texts=[form_data.query], n_results=form_data.k
|
|
|
|
)
|
|
|
|
results.append(result)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
|
2024-02-03 22:44:49 +00:00
|
|
|
return merge_and_sort_query_results(results, form_data.k)
|
2024-02-01 21:35:41 +00:00
|
|
|
|
|
|
|
|
2024-01-07 06:59:22 +00:00
|
|
|
@app.post("/web")
|
2024-01-07 10:46:12 +00:00
|
|
|
def store_web(form_data: StoreWebForm, user=Depends(get_current_user)):
|
2024-01-07 06:59:22 +00:00
|
|
|
# "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
|
|
|
|
try:
|
|
|
|
loader = WebBaseLoader(form_data.url)
|
|
|
|
data = loader.load()
|
2024-01-27 06:17:28 +00:00
|
|
|
|
|
|
|
collection_name = form_data.collection_name
|
|
|
|
if collection_name == "":
|
|
|
|
collection_name = calculate_sha256_string(form_data.url)[:63]
|
|
|
|
|
|
|
|
store_data_in_vector_db(data, collection_name)
|
2024-01-08 09:26:15 +00:00
|
|
|
return {
|
|
|
|
"status": True,
|
2024-01-27 06:17:28 +00:00
|
|
|
"collection_name": collection_name,
|
2024-01-08 09:26:15 +00:00
|
|
|
"filename": form_data.url,
|
|
|
|
}
|
2024-01-07 06:59:22 +00:00
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
|
|
detail=ERROR_MESSAGES.DEFAULT(e),
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2024-02-18 05:06:08 +00:00
|
|
|
def get_loader(filename: str, file_content_type: str, file_path: str):
|
|
|
|
file_ext = filename.split(".")[-1].lower()
|
2024-01-25 08:24:49 +00:00
|
|
|
known_type = True
|
|
|
|
|
|
|
|
known_source_ext = [
|
|
|
|
"go",
|
|
|
|
"py",
|
|
|
|
"java",
|
|
|
|
"sh",
|
|
|
|
"bat",
|
|
|
|
"ps1",
|
|
|
|
"cmd",
|
|
|
|
"js",
|
|
|
|
"ts",
|
|
|
|
"css",
|
|
|
|
"cpp",
|
|
|
|
"hpp",
|
|
|
|
"h",
|
|
|
|
"c",
|
|
|
|
"cs",
|
|
|
|
"sql",
|
|
|
|
"log",
|
|
|
|
"ini",
|
|
|
|
"pl",
|
|
|
|
"pm",
|
|
|
|
"r",
|
|
|
|
"dart",
|
|
|
|
"dockerfile",
|
|
|
|
"env",
|
|
|
|
"php",
|
|
|
|
"hs",
|
|
|
|
"hsc",
|
|
|
|
"lua",
|
|
|
|
"nginxconf",
|
|
|
|
"conf",
|
|
|
|
"m",
|
|
|
|
"mm",
|
|
|
|
"plsql",
|
|
|
|
"perl",
|
|
|
|
"rb",
|
|
|
|
"rs",
|
|
|
|
"db2",
|
|
|
|
"scala",
|
|
|
|
"bash",
|
|
|
|
"swift",
|
|
|
|
"vue",
|
|
|
|
"svelte",
|
|
|
|
]
|
|
|
|
|
|
|
|
if file_ext == "pdf":
|
|
|
|
loader = PyPDFLoader(file_path)
|
|
|
|
elif file_ext == "csv":
|
|
|
|
loader = CSVLoader(file_path)
|
|
|
|
elif file_ext == "rst":
|
|
|
|
loader = UnstructuredRSTLoader(file_path, mode="elements")
|
|
|
|
elif file_ext == "xml":
|
|
|
|
loader = UnstructuredXMLLoader(file_path)
|
|
|
|
elif file_ext == "md":
|
|
|
|
loader = UnstructuredMarkdownLoader(file_path)
|
2024-02-18 05:06:08 +00:00
|
|
|
elif file_content_type == "application/epub+zip":
|
2024-01-25 08:24:49 +00:00
|
|
|
loader = UnstructuredEPubLoader(file_path)
|
|
|
|
elif (
|
2024-02-18 05:06:08 +00:00
|
|
|
file_content_type
|
2024-01-25 08:24:49 +00:00
|
|
|
== "application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
|
|
|
or file_ext in ["doc", "docx"]
|
|
|
|
):
|
|
|
|
loader = Docx2txtLoader(file_path)
|
2024-02-18 05:06:08 +00:00
|
|
|
elif file_content_type in [
|
2024-01-25 08:24:49 +00:00
|
|
|
"application/vnd.ms-excel",
|
|
|
|
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
|
|
|
] or file_ext in ["xls", "xlsx"]:
|
|
|
|
loader = UnstructuredExcelLoader(file_path)
|
2024-02-18 05:06:08 +00:00
|
|
|
elif file_ext in known_source_ext or file_content_type.find("text/") >= 0:
|
2024-01-25 08:24:49 +00:00
|
|
|
loader = TextLoader(file_path)
|
|
|
|
else:
|
|
|
|
loader = TextLoader(file_path)
|
|
|
|
known_type = False
|
|
|
|
|
|
|
|
return loader, known_type
|
|
|
|
|
|
|
|
|
2024-01-07 06:59:22 +00:00
|
|
|
@app.post("/doc")
|
2024-01-07 10:46:12 +00:00
|
|
|
def store_doc(
|
2024-01-07 17:00:30 +00:00
|
|
|
collection_name: Optional[str] = Form(None),
|
2024-01-07 10:46:12 +00:00
|
|
|
file: UploadFile = File(...),
|
|
|
|
user=Depends(get_current_user),
|
|
|
|
):
|
2024-01-07 06:59:22 +00:00
|
|
|
# "https://www.gutenberg.org/files/1727/1727-h/1727-h.htm"
|
2024-01-07 07:40:51 +00:00
|
|
|
|
2024-01-09 23:24:53 +00:00
|
|
|
print(file.content_type)
|
2024-01-07 06:59:22 +00:00
|
|
|
try:
|
2024-01-07 07:40:51 +00:00
|
|
|
filename = file.filename
|
2024-01-07 09:40:36 +00:00
|
|
|
file_path = f"{UPLOAD_DIR}/{filename}"
|
2024-01-07 06:59:22 +00:00
|
|
|
contents = file.file.read()
|
2024-01-07 07:40:51 +00:00
|
|
|
with open(file_path, "wb") as f:
|
2024-01-07 06:59:22 +00:00
|
|
|
f.write(contents)
|
|
|
|
f.close()
|
|
|
|
|
2024-01-07 17:00:30 +00:00
|
|
|
f = open(file_path, "rb")
|
|
|
|
if collection_name == None:
|
|
|
|
collection_name = calculate_sha256(f)[:63]
|
|
|
|
f.close()
|
|
|
|
|
2024-02-18 05:06:08 +00:00
|
|
|
loader, known_type = get_loader(file.filename, file.content_type, file_path)
|
2024-01-07 07:40:51 +00:00
|
|
|
data = loader.load()
|
2024-01-07 09:40:36 +00:00
|
|
|
result = store_data_in_vector_db(data, collection_name)
|
|
|
|
|
|
|
|
if result:
|
2024-01-08 09:26:15 +00:00
|
|
|
return {
|
|
|
|
"status": True,
|
|
|
|
"collection_name": collection_name,
|
|
|
|
"filename": filename,
|
2024-01-25 08:24:49 +00:00
|
|
|
"known_type": known_type,
|
2024-01-08 09:26:15 +00:00
|
|
|
}
|
2024-01-07 09:40:36 +00:00
|
|
|
else:
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
|
|
detail=ERROR_MESSAGES.DEFAULT(),
|
|
|
|
)
|
2024-01-07 06:59:22 +00:00
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
2024-01-13 13:46:56 +00:00
|
|
|
if "No pandoc was found" in str(e):
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
|
|
detail=ERROR_MESSAGES.PANDOC_NOT_INSTALLED,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
raise HTTPException(
|
|
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
|
|
detail=ERROR_MESSAGES.DEFAULT(e),
|
|
|
|
)
|
2024-01-07 06:59:22 +00:00
|
|
|
|
|
|
|
|
2024-02-18 05:06:08 +00:00
|
|
|
@app.get("/scan")
|
|
|
|
def scan_docs_dir(user=Depends(get_admin_user)):
|
|
|
|
try:
|
|
|
|
for path in Path(DOCS_DIR).rglob("./**/*"):
|
|
|
|
if path.is_file() and not path.name.startswith("."):
|
|
|
|
tags = extract_folders_after_data_docs(path)
|
|
|
|
filename = path.name
|
|
|
|
file_content_type = mimetypes.guess_type(path)
|
|
|
|
|
|
|
|
f = open(path, "rb")
|
|
|
|
collection_name = calculate_sha256(f)[:63]
|
|
|
|
f.close()
|
|
|
|
|
|
|
|
loader, known_type = get_loader(filename, file_content_type, str(path))
|
|
|
|
data = loader.load()
|
|
|
|
|
|
|
|
result = store_data_in_vector_db(data, collection_name)
|
|
|
|
|
|
|
|
if result:
|
|
|
|
sanitized_filename = sanitize_filename(filename)
|
|
|
|
doc = Documents.get_doc_by_name(sanitized_filename)
|
|
|
|
|
|
|
|
if doc == None:
|
|
|
|
doc = Documents.insert_new_doc(
|
|
|
|
user.id,
|
|
|
|
DocumentForm(
|
|
|
|
**{
|
|
|
|
"name": sanitized_filename,
|
|
|
|
"title": filename,
|
|
|
|
"collection_name": collection_name,
|
|
|
|
"filename": filename,
|
|
|
|
"content": (
|
|
|
|
json.dumps(
|
|
|
|
{
|
|
|
|
"tags": list(
|
|
|
|
map(
|
|
|
|
lambda name: {"name": name},
|
|
|
|
tags,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
}
|
|
|
|
)
|
|
|
|
if len(tags)
|
|
|
|
else "{}"
|
|
|
|
),
|
|
|
|
}
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2024-01-07 09:40:36 +00:00
|
|
|
@app.get("/reset/db")
|
2024-02-09 00:05:01 +00:00
|
|
|
def reset_vector_db(user=Depends(get_admin_user)):
|
|
|
|
CHROMA_CLIENT.reset()
|
2024-01-07 09:40:36 +00:00
|
|
|
|
|
|
|
|
|
|
|
@app.get("/reset")
|
2024-02-09 00:05:01 +00:00
|
|
|
def reset(user=Depends(get_admin_user)) -> bool:
|
|
|
|
folder = f"{UPLOAD_DIR}"
|
|
|
|
for filename in os.listdir(folder):
|
|
|
|
file_path = os.path.join(folder, filename)
|
2024-01-07 09:40:36 +00:00
|
|
|
try:
|
2024-02-09 00:05:01 +00:00
|
|
|
if os.path.isfile(file_path) or os.path.islink(file_path):
|
|
|
|
os.unlink(file_path)
|
|
|
|
elif os.path.isdir(file_path):
|
|
|
|
shutil.rmtree(file_path)
|
2024-01-07 09:40:36 +00:00
|
|
|
except Exception as e:
|
2024-02-09 00:05:01 +00:00
|
|
|
print("Failed to delete %s. Reason: %s" % (file_path, e))
|
2024-01-07 09:40:36 +00:00
|
|
|
|
2024-02-09 00:05:01 +00:00
|
|
|
try:
|
|
|
|
CHROMA_CLIENT.reset()
|
|
|
|
except Exception as e:
|
|
|
|
print(e)
|
|
|
|
|
|
|
|
return True
|