mirror of
https://github.com/open-webui/pipelines
synced 2025-05-11 16:10:45 +00:00
Merge pull request #134 from aschaber1/refactor/llamaindex
refactor(examples/llamaindex_ollama): add use of env vars for configuration
This commit is contained in:
commit
08e08ad040
@ -10,28 +10,48 @@ requirements: llama-index, llama-index-llms-ollama, llama-index-embeddings-ollam
|
|||||||
|
|
||||||
from typing import List, Union, Generator, Iterator
|
from typing import List, Union, Generator, Iterator
|
||||||
from schemas import OpenAIChatMessage
|
from schemas import OpenAIChatMessage
|
||||||
|
import os
|
||||||
|
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class Pipeline:
|
class Pipeline:
|
||||||
|
|
||||||
|
class Valves(BaseModel):
|
||||||
|
LLAMAINDEX_OLLAMA_BASE_URL: str
|
||||||
|
LLAMAINDEX_MODEL_NAME: str
|
||||||
|
LLAMAINDEX_EMBEDDING_MODEL_NAME: str
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.documents = None
|
self.documents = None
|
||||||
self.index = None
|
self.index = None
|
||||||
|
|
||||||
|
self.valves = self.Valves(
|
||||||
|
**{
|
||||||
|
"LLAMAINDEX_OLLAMA_BASE_URL": os.getenv("LLAMAINDEX_OLLAMA_BASE_URL", "http://localhost:11434"),
|
||||||
|
"LLAMAINDEX_MODEL_NAME": os.getenv("LLAMAINDEX_MODEL_NAME", "llama3"),
|
||||||
|
"LLAMAINDEX_EMBEDDING_MODEL_NAME": os.getenv("LLAMAINDEX_EMBEDDING_MODEL_NAME", "nomic-embed-text"),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
async def on_startup(self):
|
async def on_startup(self):
|
||||||
from llama_index.embeddings.ollama import OllamaEmbedding
|
from llama_index.embeddings.ollama import OllamaEmbedding
|
||||||
from llama_index.llms.ollama import Ollama
|
from llama_index.llms.ollama import Ollama
|
||||||
from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader
|
from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader
|
||||||
|
|
||||||
Settings.embed_model = OllamaEmbedding(
|
Settings.embed_model = OllamaEmbedding(
|
||||||
model_name="nomic-embed-text",
|
model_name=self.valves.LLAMAINDEX_EMBEDDING_MODEL_NAME,
|
||||||
base_url="http://localhost:11434",
|
base_url=self.valves.LLAMAINDEX_OLLAMA_BASE_URL,
|
||||||
|
)
|
||||||
|
Settings.llm = Ollama(
|
||||||
|
model=self.valves.LLAMAINDEX_MODEL_NAME,
|
||||||
|
base_url=self.valves.LLAMAINDEX_OLLAMA_BASE_URL,
|
||||||
)
|
)
|
||||||
Settings.llm = Ollama(model="llama3")
|
|
||||||
|
|
||||||
# This function is called when the server is started.
|
# This function is called when the server is started.
|
||||||
global documents, index
|
global documents, index
|
||||||
|
|
||||||
self.documents = SimpleDirectoryReader("./data").load_data()
|
self.documents = SimpleDirectoryReader("/app/backend/data").load_data()
|
||||||
self.index = VectorStoreIndex.from_documents(self.documents)
|
self.index = VectorStoreIndex.from_documents(self.documents)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user