From c1a97278a84dc7fc9e24d478d75f3f2a39ca0f59 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Tue, 21 May 2024 12:53:43 -0700 Subject: [PATCH] feat: deploy to hf spaces --- .github/workflows/deploy-to-hf-spaces.yml | 46 +++++++++++++++++++++++ README.md | 9 +++++ backend/space/litellm_config.yaml | 43 +++++++++++++++++++++ backend/start.sh | 28 ++++++++++++++ 4 files changed, 126 insertions(+) create mode 100644 .github/workflows/deploy-to-hf-spaces.yml create mode 100644 backend/space/litellm_config.yaml diff --git a/.github/workflows/deploy-to-hf-spaces.yml b/.github/workflows/deploy-to-hf-spaces.yml new file mode 100644 index 000000000..4694d3ce2 --- /dev/null +++ b/.github/workflows/deploy-to-hf-spaces.yml @@ -0,0 +1,46 @@ +name: Deploy to HuggingFace Spaces + +on: + push: + branches: + - dev + - main + workflow_dispatch: + +jobs: + check-secret: + runs-on: ubuntu-latest + outputs: + token-set: ${{ steps.check-key.outputs.defined }} + steps: + - id: check-key + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + if: "${{ env.HF_TOKEN != '' }}" + run: echo "defined=true" >> $GITHUB_OUTPUT + + deploy: + runs-on: ubuntu-latest + needs: [check-secret] + if: needs.check-secret.outputs.token-set == 'true' + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Remove git history + run: rm -rf .git + + - name: Configure git + run: | + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git config --global user.name "github-actions[bot]" + - name: Set up Git and push to Space + run: | + git init --initial-branch=main + git lfs track "*.ttf" + rm demo.gif + git add . + git commit -m "GitHub deploy: ${{ github.sha }}" + git push --force https://open-webui:${HF_TOKEN}@huggingface.co/spaces/open-webui/open-webui main diff --git a/README.md b/README.md index a40018f0a..5d9fbc552 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,12 @@ +--- +title: Open WebUI +emoji: 🐳 +colorFrom: purple +colorTo: gray +sdk: docker +app_port: 8080 +--- + # Open WebUI (Formerly Ollama WebUI) 👋 ![GitHub stars](https://img.shields.io/github/stars/open-webui/open-webui?style=social) diff --git a/backend/space/litellm_config.yaml b/backend/space/litellm_config.yaml new file mode 100644 index 000000000..af4f880b9 --- /dev/null +++ b/backend/space/litellm_config.yaml @@ -0,0 +1,43 @@ +litellm_settings: + drop_params: true +model_list: + - model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.1' + litellm_params: + model: huggingface/mistralai/Mistral-7B-Instruct-v0.1 + api_key: os.environ/HF_TOKEN + max_tokens: 1024 + - model_name: 'HuggingFace: Mistral: Mistral 7B Instruct v0.2' + litellm_params: + model: huggingface/mistralai/Mistral-7B-Instruct-v0.2 + api_key: os.environ/HF_TOKEN + max_tokens: 1024 + - model_name: 'HuggingFace: Meta: Llama 3 8B Instruct' + litellm_params: + model: huggingface/meta-llama/Meta-Llama-3-8B-Instruct + api_key: os.environ/HF_TOKEN + max_tokens: 2047 + - model_name: 'HuggingFace: Mistral: Mixtral 8x7B Instruct v0.1' + litellm_params: + model: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1 + api_key: os.environ/HF_TOKEN + max_tokens: 8192 + - model_name: 'HuggingFace: Microsoft: Phi-3 Mini-4K-Instruct' + litellm_params: + model: huggingface/microsoft/Phi-3-mini-4k-instruct + api_key: os.environ/HF_TOKEN + max_tokens: 1024 + - model_name: 'HuggingFace: Google: Gemma 7B 1.1' + litellm_params: + model: huggingface/google/gemma-1.1-7b-it + api_key: os.environ/HF_TOKEN + max_tokens: 1024 + - model_name: 'HuggingFace: Yi-1.5 34B Chat' + litellm_params: + model: huggingface/01-ai/Yi-1.5-34B-Chat + api_key: os.environ/HF_TOKEN + max_tokens: 1024 + - model_name: 'HuggingFace: Nous Research: Nous Hermes 2 Mixtral 8x7B DPO' + litellm_params: + model: huggingface/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO + api_key: os.environ/HF_TOKEN + max_tokens: 2048 diff --git a/backend/start.sh b/backend/start.sh index 9b3411f01..209f3e31b 100755 --- a/backend/start.sh +++ b/backend/start.sh @@ -30,4 +30,32 @@ if [ "$USE_CUDA_DOCKER" = "true" ]; then export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/python3.11/site-packages/torch/lib:/usr/local/lib/python3.11/site-packages/nvidia/cudnn/lib" fi + +# HFSPACE:START +# Check if SPACE_ID is set, if so, configure for space +if [ -n "$SPACE_ID" ]; then + echo "Configuring for HuggingFace Space deployment" + + # Copy litellm_config.yaml with specified ownership + echo "Copying litellm_config.yaml to the desired location with specified ownership..." + cp ./backend/space/litellm_config.yaml ./data/litellm/config.yaml + + WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*' & + webui_pid=$! + echo "Waiting for webui to start..." + while ! curl -s http://localhost:8080/health > /dev/null; do + sleep 1 + done + echo "Creating admin user..." + curl \ + -X POST "http://localhost:8080/api/v1/auths/signup" \ + -H "accept: application/json" \ + -H "Content-Type: application/json" \ + -d "{ \"email\": \"${ADMIN_USER_EMAIL}\", \"password\": \"${ADMIN_USER_PASSWORD}\", \"name\": \"Admin\" }" + echo "Shutting down webui..." + kill $webui_pid + export WEBUI_URL=${SPACE_HOST} +fi +# HFSPACE:END + WEBUI_SECRET_KEY="$WEBUI_SECRET_KEY" exec uvicorn main:app --host "$HOST" --port "$PORT" --forwarded-allow-ips '*'