mirror of
https://github.com/open-webui/pipelines
synced 2025-05-11 08:01:08 +00:00
Testing updated Dockerfile
This commit is contained in:
parent
cdb2367493
commit
b983e8f257
14
Dockerfile
14
Dockerfile
@ -18,25 +18,17 @@ RUN apt-get update && \
|
|||||||
apt-get clean && \
|
apt-get clean && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install Rust
|
|
||||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
|
||||||
|
|
||||||
# Set up the Rust environment
|
|
||||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
|
||||||
RUN rustup default stable
|
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Install Python dependencies
|
# Install Python dependencies
|
||||||
COPY ./requirements.txt .
|
COPY ./requirements.txt .
|
||||||
RUN pip3 install uv && \
|
RUN pip3 install uv && \
|
||||||
if [ "$USE_CUDA" = "true" ]; then \
|
if [ "$USE_CUDA" = "true" ]; then \
|
||||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir; \
|
||||||
uv pip install --system -r requirements.txt --no-cache-dir; \
|
|
||||||
else \
|
else \
|
||||||
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
|
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \
|
||||||
uv pip install --system -r requirements.txt --no-cache-dir; \
|
|
||||||
fi
|
fi
|
||||||
|
RUN uv pip install --system -r requirements.txt --no-cache-dir
|
||||||
|
|
||||||
# Copy the application code
|
# Copy the application code
|
||||||
COPY . .
|
COPY . .
|
||||||
|
@ -5,5 +5,5 @@
|
|||||||
# docker volume rm open-webui
|
# docker volume rm open-webui
|
||||||
|
|
||||||
# Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place
|
# Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place
|
||||||
docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env ghcr.io/open-webui/pipelines:latest
|
docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env pipelines #ghcr.io/open-webui/pipelines:latest
|
||||||
docker run -d -p 3000:8080 -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! ghcr.io/open-webui/open-webui:ollama
|
docker run -d -p 3000:8080 -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! ghcr.io/open-webui/open-webui:ollama
|
@ -5,7 +5,7 @@ date: 2024-06-06
|
|||||||
version: 1.0
|
version: 1.0
|
||||||
license: MIT
|
license: MIT
|
||||||
description: A filter pipeline that sends traces to DataDog.
|
description: A filter pipeline that sends traces to DataDog.
|
||||||
requirements: git+https://github.com/DataDog/dd-trace-py.git@main
|
requirements: ddtrace
|
||||||
environment_variables: DD_LLMOBS_AGENTLESS_ENABLED, DD_LLMOBS_ENABLED, DD_LLMOBS_APP_NAME, DD_API_KEY, DD_SITE
|
environment_variables: DD_LLMOBS_AGENTLESS_ENABLED, DD_LLMOBS_ENABLED, DD_LLMOBS_APP_NAME, DD_API_KEY, DD_SITE
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -104,9 +104,6 @@ class Pipeline:
|
|||||||
input_data = get_last_user_message(body["messages"]),
|
input_data = get_last_user_message(body["messages"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
print("SPAN: ")
|
|
||||||
print(self.llm_span)
|
|
||||||
|
|
||||||
return body
|
return body
|
||||||
|
|
||||||
|
|
||||||
@ -114,9 +111,7 @@ class Pipeline:
|
|||||||
print(f"outlet:{__name__}")
|
print(f"outlet:{__name__}")
|
||||||
if body["chat_id"] not in self.chat_generations:
|
if body["chat_id"] not in self.chat_generations:
|
||||||
return body
|
return body
|
||||||
print("SELF LLM SPAN")
|
|
||||||
print(self.llm_span)
|
|
||||||
#self.set_dd()
|
|
||||||
self.LLMObs.annotate(
|
self.LLMObs.annotate(
|
||||||
span = self.llm_span,
|
span = self.llm_span,
|
||||||
output_data = get_last_assistant_message(body["messages"]),
|
output_data = get_last_assistant_message(body["messages"]),
|
||||||
|
@ -29,7 +29,8 @@ chromadb
|
|||||||
|
|
||||||
# Observability
|
# Observability
|
||||||
langfuse
|
langfuse
|
||||||
git+https://github.com/DataDog/dd-trace-py.git@main
|
#git+https://github.com/DataDog/dd-trace-py.git@main
|
||||||
|
ddtrace
|
||||||
|
|
||||||
# ML libraries
|
# ML libraries
|
||||||
torch
|
torch
|
||||||
|
Loading…
Reference in New Issue
Block a user