From 5d6517c537338723d360461768c61010c836de72 Mon Sep 17 00:00:00 2001
From: Yanyutin753 <132346501+Yanyutin753@users.noreply.github.com>
Date: Tue, 7 May 2024 08:28:34 +0800
Subject: [PATCH] update the name
---
backend/apps/audio/main.py | 10 +++----
backend/config.py | 2 +-
src/lib/apis/audio/index.ts | 4 +--
.../chat/Messages/ResponseMessage.svelte | 2 +-
src/lib/components/chat/Settings/Audio.svelte | 26 +++++++++----------
src/lib/stores/index.ts | 2 +-
6 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/backend/apps/audio/main.py b/backend/apps/audio/main.py
index be605388c..87732d7bc 100644
--- a/backend/apps/audio/main.py
+++ b/backend/apps/audio/main.py
@@ -44,7 +44,7 @@ from config import (
AUDIO_OPENAI_API_BASE_URL,
AUDIO_OPENAI_API_KEY,
AUDIO_OPENAI_API_MODEL,
- AUDIO_OPENAI_API_SPEAKER,
+ AUDIO_OPENAI_API_VOICE,
)
log = logging.getLogger(__name__)
@@ -63,7 +63,7 @@ app.add_middleware(
app.state.OPENAI_API_BASE_URL = AUDIO_OPENAI_API_BASE_URL
app.state.OPENAI_API_KEY = AUDIO_OPENAI_API_KEY
app.state.OPENAI_API_MODEL = AUDIO_OPENAI_API_MODEL
-app.state.OPENAI_API_SPEAKER = AUDIO_OPENAI_API_SPEAKER
+app.state.OPENAI_API_VOICE = AUDIO_OPENAI_API_VOICE
# setting device type for whisper model
whisper_device_type = DEVICE_TYPE if DEVICE_TYPE and DEVICE_TYPE == "cuda" else "cpu"
@@ -86,7 +86,7 @@ async def get_openai_config(user=Depends(get_admin_user)):
"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
"OPENAI_API_KEY": app.state.OPENAI_API_KEY,
"OPENAI_API_MODEL": app.state.OPENAI_API_MODEL,
- "OPENAI_API_SPEAKER": app.state.OPENAI_API_SPEAKER,
+ "OPENAI_API_VOICE": app.state.OPENAI_API_VOICE,
}
@@ -100,14 +100,14 @@ async def update_openai_config(
app.state.OPENAI_API_BASE_URL = form_data.url
app.state.OPENAI_API_KEY = form_data.key
app.state.OPENAI_API_MODEL = form_data.model
- app.state.OPENAI_API_SPEAKER = form_data.speaker
+ app.state.OPENAI_API_VOICE = form_data.speaker
return {
"status": True,
"OPENAI_API_BASE_URL": app.state.OPENAI_API_BASE_URL,
"OPENAI_API_KEY": app.state.OPENAI_API_KEY,
"OPENAI_API_MODEL": app.state.OPENAI_API_MODEL,
- "OPENAI_API_SPEAKER": app.state.OPENAI_API_SPEAKER,
+ "OPENAI_API_VOICE": app.state.OPENAI_API_VOICE,
}
diff --git a/backend/config.py b/backend/config.py
index 02352a092..a6dc83ffa 100644
--- a/backend/config.py
+++ b/backend/config.py
@@ -575,7 +575,7 @@ IMAGE_GENERATION_MODEL = os.getenv("IMAGE_GENERATION_MODEL", "")
AUDIO_OPENAI_API_BASE_URL = os.getenv("AUDIO_OPENAI_API_BASE_URL", OPENAI_API_BASE_URL)
AUDIO_OPENAI_API_KEY = os.getenv("AUDIO_OPENAI_API_KEY", OPENAI_API_KEY)
AUDIO_OPENAI_API_MODEL = os.getenv("AUDIO_OPENAI_API_MODEL", "tts-1")
-AUDIO_OPENAI_API_SPEAKER = os.getenv("AUDIO_OPENAI_API_SPEAKER", "alloy")
+AUDIO_OPENAI_API_VOICE = os.getenv("AUDIO_OPENAI_API_VOICE", "alloy")
####################################
# LiteLLM
diff --git a/src/lib/apis/audio/index.ts b/src/lib/apis/audio/index.ts
index 3b716d58e..7bd8981fe 100644
--- a/src/lib/apis/audio/index.ts
+++ b/src/lib/apis/audio/index.ts
@@ -98,7 +98,7 @@ export const synthesizeOpenAISpeech = async (
token: string = '',
speaker: string = 'alloy',
text: string = '',
- OpenAIModel: string = 'tts-1'
+ model: string = 'tts-1'
) => {
let error = null;
@@ -109,7 +109,7 @@ export const synthesizeOpenAISpeech = async (
'Content-Type': 'application/json'
},
body: JSON.stringify({
- model: OpenAIModel,
+ model: model,
input: text,
voice: speaker
})
diff --git a/src/lib/components/chat/Messages/ResponseMessage.svelte b/src/lib/components/chat/Messages/ResponseMessage.svelte
index 73c56d31a..67b6e3a34 100644
--- a/src/lib/components/chat/Messages/ResponseMessage.svelte
+++ b/src/lib/components/chat/Messages/ResponseMessage.svelte
@@ -224,7 +224,7 @@
localStorage.token,
$settings?.audio?.speaker,
sentence,
- $settings?.audio?.OpenAIModel
+ $settings?.audio?.model
).catch((error) => {
toast.error(error);
diff --git a/src/lib/components/chat/Settings/Audio.svelte b/src/lib/components/chat/Settings/Audio.svelte
index a3c453a17..a7b8ec11a 100644
--- a/src/lib/components/chat/Settings/Audio.svelte
+++ b/src/lib/components/chat/Settings/Audio.svelte
@@ -27,7 +27,7 @@
let voices = [];
let speaker = '';
let models = [];
- let OpenAIModel = '';
+ let model = '';
const getOpenAIVoices = () => {
voices = [
@@ -85,15 +85,15 @@
const res = await updateAudioConfig(localStorage.token, {
url: OpenAIUrl,
key: OpenAIKey,
- model: OpenAIModel,
- speaker: speaker,
+ model: model,
+ speaker: speaker
});
if (res) {
OpenAIUrl = res.OPENAI_API_BASE_URL;
OpenAIKey = res.OPENAI_API_KEY;
- OpenAIModel = res.OPENAI_API_MODEL;
- speaker = res.OPENAI_API_SPEAKER;
+ model = res.OPENAI_API_MODEL;
+ speaker = res.OPENAI_API_VOICE;
}
}
};
@@ -108,7 +108,7 @@
STTEngine = settings?.audio?.STTEngine ?? '';
TTSEngine = settings?.audio?.TTSEngine ?? '';
speaker = settings?.audio?.speaker ?? '';
- OpenAIModel = settings?.audio?.OpenAIModel ?? '';
+ model = settings?.audio?.model ?? '';
if (TTSEngine === 'openai') {
getOpenAIVoices();
@@ -123,8 +123,8 @@
if (res) {
OpenAIUrl = res.OPENAI_API_BASE_URL;
OpenAIKey = res.OPENAI_API_KEY;
- OpenAIModel = res.OPENAI_API_MODEL;
- speaker = res.OPENAI_API_SPEAKER;
+ model = res.OPENAI_API_MODEL;
+ speaker = res.OPENAI_API_VOICE;
}
}
});
@@ -141,7 +141,7 @@
STTEngine: STTEngine !== '' ? STTEngine : undefined,
TTSEngine: TTSEngine !== '' ? TTSEngine : undefined,
speaker: speaker !== '' ? speaker : undefined,
- OpenAIModel: OpenAIModel !== '' ? OpenAIModel : undefined
+ model: model !== '' ? model : undefined
}
});
dispatch('save');
@@ -230,7 +230,7 @@
if (e.target.value === 'openai') {
getOpenAIVoices();
speaker = 'alloy';
- OpenAIModel = 'tts-1';
+ model = 'tts-1';
} else {
getWebAPIVoices();
speaker = '';
@@ -330,13 +330,13 @@
diff --git a/src/lib/stores/index.ts b/src/lib/stores/index.ts
index 32c738ae3..c4ccb5eec 100644
--- a/src/lib/stores/index.ts
+++ b/src/lib/stores/index.ts
@@ -102,7 +102,7 @@ type AudioSettings = {
STTEngine?: string;
TTSEngine?: string;
speaker?: string;
- OpenAIModel?: string;
+ model?: string;
};
type TitleSettings = {