open-webui/backend/open_webui/utils/response.py

74 lines
2.6 KiB
Python
Raw Normal View History

2024-09-20 23:07:57 +00:00
import json
from open_webui.utils.misc import (
2024-09-20 23:07:57 +00:00
openai_chat_chunk_message_template,
openai_chat_completion_message_template,
)
def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
model = ollama_response.get("model", "ollama")
message_content = ollama_response.get("message", {}).get("content", "")
response = openai_chat_completion_message_template(model, message_content)
return response
2024-09-20 23:07:57 +00:00
async def convert_streaming_response_ollama_to_openai(ollama_streaming_response):
async for data in ollama_streaming_response.body_iterator:
data = json.loads(data)
model = data.get("model", "ollama")
message_content = data.get("message", {}).get("content", "")
done = data.get("done", False)
2024-12-13 07:31:08 +00:00
usage = None
if done:
usage = {
"response_token/s": (
round(
(
(
data.get("eval_count", 0)
2024-12-27 04:35:14 +00:00
/ ((data.get("eval_duration", 0) / 1_000_000))
2024-12-13 07:31:08 +00:00
)
* 100
),
2,
)
if data.get("eval_duration", 0) > 0
else "N/A"
),
"prompt_token/s": (
round(
(
(
data.get("prompt_eval_count", 0)
2024-12-27 04:35:14 +00:00
/ ((data.get("prompt_eval_duration", 0) / 1_000_000))
2024-12-13 07:31:08 +00:00
)
* 100
),
2,
)
if data.get("prompt_eval_duration", 0) > 0
else "N/A"
),
2024-12-27 04:35:14 +00:00
"total_duration": data.get("total_duration", 0),
"load_duration": data.get("load_duration", 0),
2024-12-13 07:31:08 +00:00
"prompt_eval_count": data.get("prompt_eval_count", 0),
2024-12-27 04:35:14 +00:00
"prompt_eval_duration": data.get("prompt_eval_duration", 0),
2024-12-13 07:31:08 +00:00
"eval_count": data.get("eval_count", 0),
2024-12-27 04:35:14 +00:00
"eval_duration": data.get("eval_duration", 0),
2024-12-13 07:31:08 +00:00
"approximate_total": (
lambda s: f"{s // 3600}h{(s % 3600) // 60}m{s % 60}s"
)((data.get("total_duration", 0) or 0) // 1_000_000_000),
}
2024-09-20 23:07:57 +00:00
data = openai_chat_chunk_message_template(
2024-12-13 07:31:08 +00:00
model, message_content if not done else None, usage
2024-09-20 23:07:57 +00:00
)
line = f"data: {json.dumps(data)}\n\n"
yield line
2024-10-21 20:45:28 +00:00
yield "data: [DONE]\n\n"