mirror of
https://github.com/open-webui/pipelines
synced 2025-05-10 23:50:45 +00:00
fix
This commit is contained in:
parent
0b99d06f23
commit
68938c0808
@ -9,15 +9,20 @@ requirements: langfuse
|
||||
"""
|
||||
|
||||
from typing import List, Optional
|
||||
from schemas import OpenAIChatMessage
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from utils.pipelines.main import get_last_user_message, get_last_assistant_message
|
||||
from utils.pipelines.main import get_last_assistant_message
|
||||
from pydantic import BaseModel
|
||||
from langfuse import Langfuse
|
||||
from langfuse.api.resources.commons.errors.unauthorized_error import UnauthorizedError
|
||||
|
||||
def get_last_assistant_message_obj(messages: List[dict]) -> dict:
|
||||
for message in reversed(messages):
|
||||
if message["role"] == "assistant":
|
||||
return message
|
||||
return {}
|
||||
|
||||
|
||||
class Pipeline:
|
||||
class Valves(BaseModel):
|
||||
@ -109,21 +114,28 @@ class Pipeline:
|
||||
|
||||
async def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
|
||||
print(f"outlet:{__name__}")
|
||||
print(f"Received body: {body}")
|
||||
if body["chat_id"] not in self.chat_generations:
|
||||
return body
|
||||
|
||||
generation = self.chat_generations[body["chat_id"]]
|
||||
assistant_message = get_last_assistant_message(body["messages"])
|
||||
|
||||
# Extract usage information
|
||||
info = assistant_message.get("info", {})
|
||||
|
||||
# Extract usage information for models that support it
|
||||
usage = None
|
||||
if "prompt_tokens" in info and "completion_tokens" in info:
|
||||
usage = {
|
||||
"input": info["prompt_tokens"],
|
||||
"output": info["completion_tokens"],
|
||||
"unit": "TOKENS",
|
||||
}
|
||||
assistant_message_obj = get_last_assistant_message_obj(body["messages"])
|
||||
if assistant_message_obj:
|
||||
info = assistant_message_obj.get("info", {})
|
||||
if isinstance(info, dict):
|
||||
prompt_eval_count = info.get("prompt_eval_count")
|
||||
eval_count = info.get("eval_count")
|
||||
if prompt_eval_count is not None and eval_count is not None:
|
||||
usage = {
|
||||
"input": prompt_eval_count,
|
||||
"output": eval_count,
|
||||
"unit": "TOKENS",
|
||||
}
|
||||
|
||||
# Update generation
|
||||
generation.end(
|
||||
|
Loading…
Reference in New Issue
Block a user