mirror of
https://github.com/open-webui/pipelines
synced 2025-05-14 17:35:45 +00:00
enh: langfuse
This commit is contained in:
parent
62d0d138d2
commit
418572bfaf
@ -2,7 +2,7 @@
|
|||||||
title: Langfuse Filter Pipeline
|
title: Langfuse Filter Pipeline
|
||||||
author: open-webui
|
author: open-webui
|
||||||
date: 2024-05-30
|
date: 2024-05-30
|
||||||
version: 1.0
|
version: 1.1
|
||||||
license: MIT
|
license: MIT
|
||||||
description: A filter pipeline that uses Langfuse.
|
description: A filter pipeline that uses Langfuse.
|
||||||
requirements: langfuse
|
requirements: langfuse
|
||||||
@ -12,13 +12,12 @@ from typing import List, Optional
|
|||||||
from schemas import OpenAIChatMessage
|
from schemas import OpenAIChatMessage
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from utils.pipelines.main import get_last_user_message, get_last_assistant_message
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
from langfuse import Langfuse
|
from langfuse import Langfuse
|
||||||
|
|
||||||
|
|
||||||
class Pipeline:
|
class Pipeline:
|
||||||
|
|
||||||
class Valves(BaseModel):
|
class Valves(BaseModel):
|
||||||
# List target pipeline ids (models) that this filter will be connected to.
|
# List target pipeline ids (models) that this filter will be connected to.
|
||||||
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
|
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
|
||||||
@ -58,6 +57,7 @@ class Pipeline:
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.langfuse = None
|
self.langfuse = None
|
||||||
|
self.chat_generations = {}
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def on_startup(self):
|
async def on_startup(self):
|
||||||
@ -98,21 +98,38 @@ class Pipeline:
|
|||||||
session_id=body["chat_id"],
|
session_id=body["chat_id"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
generation = trace.generation(
|
||||||
|
name=body["chat_id"],
|
||||||
|
model=body["model"],
|
||||||
|
input=body["messages"],
|
||||||
|
metadata={"interface": "open-webui"},
|
||||||
|
)
|
||||||
|
|
||||||
|
self.chat_generations[body["chat_id"]] = generation
|
||||||
print(trace.get_trace_url())
|
print(trace.get_trace_url())
|
||||||
|
|
||||||
return body
|
return body
|
||||||
|
|
||||||
async def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
|
async def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
|
||||||
print(f"outlet:{__name__}")
|
print(f"outlet:{__name__}")
|
||||||
|
if body["chat_id"] not in self.chat_generations:
|
||||||
|
return body
|
||||||
|
|
||||||
trace = self.langfuse.trace(
|
generation = self.chat_generations[body["chat_id"]]
|
||||||
name=f"filter:{__name__}",
|
|
||||||
input=body,
|
user_message = get_last_user_message(body["messages"])
|
||||||
user_id=user["id"],
|
generated_message = get_last_assistant_message(body["messages"])
|
||||||
metadata={"name": user["name"]},
|
|
||||||
session_id=body["chat_id"],
|
# Update usage cost based on the length of the input and output messages
|
||||||
|
# Below does not reflect the actual cost of the API
|
||||||
|
# You can adjust the cost based on your requirements
|
||||||
|
generation.end(
|
||||||
|
output=generated_message,
|
||||||
|
usage={
|
||||||
|
"totalCost": (len(user_message) + len(generated_message)) / 1000,
|
||||||
|
"unit": "CHARACTERS",
|
||||||
|
},
|
||||||
|
metadata={"interface": "open-webui"},
|
||||||
)
|
)
|
||||||
|
|
||||||
print(trace.get_trace_url())
|
|
||||||
|
|
||||||
return body
|
return body
|
||||||
|
Loading…
Reference in New Issue
Block a user