mirror of
				https://github.com/open-webui/open-webui
				synced 2025-06-26 18:26:48 +00:00 
			
		
		
		
	Merge pull request #725 from jnkstr/no-internet-whisper
fix: no internet connection for whisper if you use docker
This commit is contained in:
		
						commit
						1def55cf09
					
				@ -30,6 +30,10 @@ ENV WEBUI_SECRET_KEY ""
 | 
			
		||||
ENV SCARF_NO_ANALYTICS true
 | 
			
		||||
ENV DO_NOT_TRACK true
 | 
			
		||||
 | 
			
		||||
#Whisper TTS Settings
 | 
			
		||||
ENV WHISPER_MODEL="base"
 | 
			
		||||
ENV WHISPER_MODEL_DIR="/app/backend/data/cache/whisper/models"
 | 
			
		||||
 | 
			
		||||
WORKDIR /app/backend
 | 
			
		||||
 | 
			
		||||
# install python dependencies
 | 
			
		||||
@ -45,6 +49,8 @@ RUN apt-get update \
 | 
			
		||||
    && rm -rf /var/lib/apt/lists/*
 | 
			
		||||
 | 
			
		||||
# RUN python -c "from sentence_transformers import SentenceTransformer; model = SentenceTransformer('all-MiniLM-L6-v2')"
 | 
			
		||||
RUN python -c "import os; from faster_whisper import WhisperModel; WhisperModel(os.environ['WHISPER_MODEL'], device='cpu', compute_type='int8', download_root=os.environ['WHISPER_MODEL_DIR'])"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# copy embedding weight from build
 | 
			
		||||
RUN mkdir -p /root/.cache/chroma/onnx_models/all-MiniLM-L6-v2
 | 
			
		||||
 | 
			
		||||
@ -1,3 +1,4 @@
 | 
			
		||||
import os
 | 
			
		||||
from fastapi import (
 | 
			
		||||
    FastAPI,
 | 
			
		||||
    Request,
 | 
			
		||||
@ -20,7 +21,7 @@ from utils.utils import (
 | 
			
		||||
)
 | 
			
		||||
from utils.misc import calculate_sha256
 | 
			
		||||
 | 
			
		||||
from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL_NAME
 | 
			
		||||
from config import CACHE_DIR, UPLOAD_DIR, WHISPER_MODEL, WHISPER_MODEL_DIR
 | 
			
		||||
 | 
			
		||||
app = FastAPI()
 | 
			
		||||
app.add_middleware(
 | 
			
		||||
@ -53,12 +54,11 @@ def transcribe(
 | 
			
		||||
            f.write(contents)
 | 
			
		||||
            f.close()
 | 
			
		||||
 | 
			
		||||
        model_name = WHISPER_MODEL_NAME
 | 
			
		||||
        model = WhisperModel(
 | 
			
		||||
            model_name,
 | 
			
		||||
            WHISPER_MODEL,
 | 
			
		||||
            device="cpu",
 | 
			
		||||
            compute_type="int8",
 | 
			
		||||
            download_root=f"{CACHE_DIR}/whisper/models",
 | 
			
		||||
            download_root=WHISPER_MODEL_DIR,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        segments, info = model.transcribe(file_path, beam_size=5)
 | 
			
		||||
 | 
			
		||||
@ -139,4 +139,6 @@ CHUNK_OVERLAP = 100
 | 
			
		||||
####################################
 | 
			
		||||
# Transcribe
 | 
			
		||||
####################################
 | 
			
		||||
WHISPER_MODEL_NAME = "base"
 | 
			
		||||
 | 
			
		||||
WHISPER_MODEL = os.getenv("WHISPER_MODEL", "base")
 | 
			
		||||
WHISPER_MODEL_DIR = os.getenv("WHISPER_MODEL_DIR", f"{CACHE_DIR}/whisper/models")
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user