open-webui/backend/open_webui/utils/response.py

32 lines
982 B
Python
Raw Normal View History

2024-09-20 23:07:57 +00:00
import json
from open_webui.utils.misc import (
2024-09-20 23:07:57 +00:00
openai_chat_chunk_message_template,
openai_chat_completion_message_template,
)
def convert_response_ollama_to_openai(ollama_response: dict) -> dict:
model = ollama_response.get("model", "ollama")
message_content = ollama_response.get("message", {}).get("content", "")
response = openai_chat_completion_message_template(model, message_content)
return response
2024-09-20 23:07:57 +00:00
async def convert_streaming_response_ollama_to_openai(ollama_streaming_response):
async for data in ollama_streaming_response.body_iterator:
data = json.loads(data)
model = data.get("model", "ollama")
message_content = data.get("message", {}).get("content", "")
done = data.get("done", False)
data = openai_chat_chunk_message_template(
model, message_content if not done else None
)
line = f"data: {json.dumps(data)}\n\n"
yield line
2024-10-21 20:45:28 +00:00
yield "data: [DONE]\n\n"