mirror of
https://github.com/open-webui/open-webui
synced 2025-06-26 18:26:48 +00:00
refac
This commit is contained in:
@@ -136,7 +136,7 @@ async def generate_chat_completion(
|
||||
response = await generate_ollama_chat_completion(
|
||||
request=request, form_data=form_data, user=user, bypass_filter=bypass_filter
|
||||
)
|
||||
if form_data.stream:
|
||||
if form_data.get("stream"):
|
||||
response.headers["content-type"] = "text/event-stream"
|
||||
return StreamingResponse(
|
||||
convert_streaming_response_ollama_to_openai(response),
|
||||
|
||||
@@ -106,7 +106,7 @@ def openai_chat_message_template(model: str):
|
||||
|
||||
|
||||
def openai_chat_chunk_message_template(
|
||||
model: str, message: Optional[str] = None
|
||||
model: str, message: Optional[str] = None, usage: Optional[dict] = None
|
||||
) -> dict:
|
||||
template = openai_chat_message_template(model)
|
||||
template["object"] = "chat.completion.chunk"
|
||||
@@ -114,17 +114,23 @@ def openai_chat_chunk_message_template(
|
||||
template["choices"][0]["delta"] = {"content": message}
|
||||
else:
|
||||
template["choices"][0]["finish_reason"] = "stop"
|
||||
|
||||
if usage:
|
||||
template["usage"] = usage
|
||||
return template
|
||||
|
||||
|
||||
def openai_chat_completion_message_template(
|
||||
model: str, message: Optional[str] = None
|
||||
model: str, message: Optional[str] = None, usage: Optional[dict] = None
|
||||
) -> dict:
|
||||
template = openai_chat_message_template(model)
|
||||
template["object"] = "chat.completion"
|
||||
if message is not None:
|
||||
template["choices"][0]["message"] = {"content": message, "role": "assistant"}
|
||||
template["choices"][0]["finish_reason"] = "stop"
|
||||
|
||||
if usage:
|
||||
template["usage"] = usage
|
||||
return template
|
||||
|
||||
|
||||
|
||||
@@ -21,8 +21,63 @@ async def convert_streaming_response_ollama_to_openai(ollama_streaming_response)
|
||||
message_content = data.get("message", {}).get("content", "")
|
||||
done = data.get("done", False)
|
||||
|
||||
usage = None
|
||||
if done:
|
||||
usage = {
|
||||
"response_token/s": (
|
||||
round(
|
||||
(
|
||||
(
|
||||
data.get("eval_count", 0)
|
||||
/ ((data.get("eval_duration", 0) / 1_000_000_000))
|
||||
)
|
||||
* 100
|
||||
),
|
||||
2,
|
||||
)
|
||||
if data.get("eval_duration", 0) > 0
|
||||
else "N/A"
|
||||
),
|
||||
"prompt_token/s": (
|
||||
round(
|
||||
(
|
||||
(
|
||||
data.get("prompt_eval_count", 0)
|
||||
/ (
|
||||
(
|
||||
data.get("prompt_eval_duration", 0)
|
||||
/ 1_000_000_000
|
||||
)
|
||||
)
|
||||
)
|
||||
* 100
|
||||
),
|
||||
2,
|
||||
)
|
||||
if data.get("prompt_eval_duration", 0) > 0
|
||||
else "N/A"
|
||||
),
|
||||
"total_duration": round(
|
||||
((data.get("total_duration", 0) / 1_000_000) * 100), 2
|
||||
),
|
||||
"load_duration": round(
|
||||
((data.get("load_duration", 0) / 1_000_000) * 100), 2
|
||||
),
|
||||
"prompt_eval_count": data.get("prompt_eval_count", 0),
|
||||
"prompt_eval_duration": round(
|
||||
((data.get("prompt_eval_duration", 0) / 1_000_000) * 100), 2
|
||||
),
|
||||
"eval_count": data.get("eval_count", 0),
|
||||
"eval_duration": round(
|
||||
((data.get("eval_duration", 0) / 1_000_000) * 100), 2
|
||||
),
|
||||
"approximate_total": (
|
||||
lambda s: f"{s // 3600}h{(s % 3600) // 60}m{s % 60}s"
|
||||
)((data.get("total_duration", 0) or 0) // 1_000_000_000),
|
||||
}
|
||||
|
||||
data = openai_chat_chunk_message_template(
|
||||
model, message_content if not done else None
|
||||
model, message_content if not done else None, usage
|
||||
)
|
||||
|
||||
line = f"data: {json.dumps(data)}\n\n"
|
||||
|
||||
Reference in New Issue
Block a user