Testing updated Dockerfile

This commit is contained in:
0xThresh.eth 2024-06-16 11:01:41 -07:00
parent cdb2367493
commit b983e8f257
4 changed files with 8 additions and 20 deletions

View File

@ -18,25 +18,17 @@ RUN apt-get update && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
# Install Rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Set up the Rust environment
ENV PATH="/root/.cargo/bin:${PATH}"
RUN rustup default stable
WORKDIR /app
# Install Python dependencies
COPY ./requirements.txt .
RUN pip3 install uv && \
if [ "$USE_CUDA" = "true" ]; then \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir; \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/$USE_CUDA_DOCKER_VER --no-cache-dir; \
else \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir && \
uv pip install --system -r requirements.txt --no-cache-dir; \
pip3 install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu --no-cache-dir; \
fi
RUN uv pip install --system -r requirements.txt --no-cache-dir
# Copy the application code
COPY . .

View File

@ -5,5 +5,5 @@
# docker volume rm open-webui
# Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place
docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env ghcr.io/open-webui/pipelines:latest
docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env pipelines #ghcr.io/open-webui/pipelines:latest
docker run -d -p 3000:8080 -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! ghcr.io/open-webui/open-webui:ollama

View File

@ -5,7 +5,7 @@ date: 2024-06-06
version: 1.0
license: MIT
description: A filter pipeline that sends traces to DataDog.
requirements: git+https://github.com/DataDog/dd-trace-py.git@main
requirements: ddtrace
environment_variables: DD_LLMOBS_AGENTLESS_ENABLED, DD_LLMOBS_ENABLED, DD_LLMOBS_APP_NAME, DD_API_KEY, DD_SITE
"""
@ -104,9 +104,6 @@ class Pipeline:
input_data = get_last_user_message(body["messages"]),
)
print("SPAN: ")
print(self.llm_span)
return body
@ -114,9 +111,7 @@ class Pipeline:
print(f"outlet:{__name__}")
if body["chat_id"] not in self.chat_generations:
return body
print("SELF LLM SPAN")
print(self.llm_span)
#self.set_dd()
self.LLMObs.annotate(
span = self.llm_span,
output_data = get_last_assistant_message(body["messages"]),

View File

@ -29,7 +29,8 @@ chromadb
# Observability
langfuse
git+https://github.com/DataDog/dd-trace-py.git@main
#git+https://github.com/DataDog/dd-trace-py.git@main
ddtrace
# ML libraries
torch