This commit is contained in:
Jannik Streidl
2024-06-05 20:54:11 +02:00
40 changed files with 991 additions and 569 deletions

View File

@@ -6,32 +6,33 @@ import time
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves for conversation turn limiting
target_user_roles: List[str] = ["user"]
max_turns: Optional[int] = None
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Assign a unique identifier to the pipeline.
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "conversation_turn_limit_filter_pipeline"
# self.id = "conversation_turn_limit_filter_pipeline"
self.name = "Conversation Turn Limit Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves for conversation turn limiting
target_user_roles: List[str] = ["user"]
max_turns: Optional[int] = None
self.valves = Valves(
self.valves = self.Valves(
**{
"pipelines": os.getenv("CONVERSATION_TURN_PIPELINES", "*").split(","),
"max_turns": 10,

View File

@@ -16,31 +16,31 @@ import os
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "detoxify_filter_pipeline"
# self.id = "detoxify_filter_pipeline"
self.name = "Detoxify Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Initialize
self.valves = Valves(
self.valves = self.Valves(
**{
"pipelines": ["*"], # Connect to all pipelines
}

View File

@@ -0,0 +1,100 @@
import os
import requests
from typing import Literal, List, Optional
from datetime import datetime
from blueprints.function_calling_blueprint import Pipeline as FunctionCallingBlueprint
class Pipeline(FunctionCallingBlueprint):
class Valves(FunctionCallingBlueprint.Valves):
# Add your custom parameters here
OPENWEATHERMAP_API_KEY: str = ""
pass
class Tools:
def __init__(self, pipeline) -> None:
self.pipeline = pipeline
def get_current_time(
self,
) -> str:
"""
Get the current time.
:return: The current time.
"""
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
return f"Current Time = {current_time}"
def get_current_weather(
self,
location: str,
unit: Literal["metric", "fahrenheit"] = "fahrenheit",
) -> str:
"""
Get the current weather for a location. If the location is not found, return an empty string.
:param location: The location to get the weather for.
:param unit: The unit to get the weather in. Default is fahrenheit.
:return: The current weather for the location.
"""
# https://openweathermap.org/api
if self.pipeline.valves.OPENWEATHERMAP_API_KEY == "":
return "OpenWeatherMap API Key not set, ask the user to set it up."
else:
units = "imperial" if unit == "fahrenheit" else "metric"
params = {
"q": location,
"appid": self.pipeline.valves.OPENWEATHERMAP_API_KEY,
"units": units,
}
response = requests.get(
"http://api.openweathermap.org/data/2.5/weather", params=params
)
response.raise_for_status() # Raises an HTTPError for bad responses
data = response.json()
weather_description = data["weather"][0]["description"]
temperature = data["main"]["temp"]
return f"{location}: {weather_description.capitalize()}, {temperature}°{unit.capitalize()[0]}"
def calculator(self, equation: str) -> str:
"""
Calculate the result of an equation.
:param equation: The equation to calculate.
"""
# Avoid using eval in production code
# https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
try:
result = eval(equation)
return f"{equation} = {result}"
except Exception as e:
print(e)
return "Invalid equation"
def __init__(self):
super().__init__()
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
# self.id = "my_tools_pipeline"
self.name = "My Tools Pipeline"
self.valves = self.Valves(
**{
**self.valves.model_dump(),
"pipelines": ["*"], # Connect to all pipelines
"OPENWEATHERMAP_API_KEY": os.getenv("OPENWEATHERMAP_API_KEY", ""),
},
)
self.tools = self.Tools(self)

View File

@@ -0,0 +1,135 @@
"""
title: Langfuse Filter Pipeline
author: open-webui
date: 2024-05-30
version: 1.1
license: MIT
description: A filter pipeline that uses Langfuse.
requirements: langfuse
"""
from typing import List, Optional
from schemas import OpenAIChatMessage
import os
from utils.pipelines.main import get_last_user_message, get_last_assistant_message
from pydantic import BaseModel
from langfuse import Langfuse
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves
secret_key: str
public_key: str
host: str
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
# self.id = "langfuse_filter_pipeline"
self.name = "Langfuse Filter"
# Initialize
self.valves = self.Valves(
**{
"pipelines": ["*"], # Connect to all pipelines
"secret_key": os.getenv("LANGFUSE_SECRET_KEY", "your-secret-key-here"),
"public_key": os.getenv("LANGFUSE_PUBLIC_KEY", "your-public-key-here"),
"host": os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com"),
}
)
self.langfuse = None
self.chat_generations = {}
pass
async def on_startup(self):
# This function is called when the server is started.
print(f"on_startup:{__name__}")
self.set_langfuse()
pass
async def on_shutdown(self):
# This function is called when the server is stopped.
print(f"on_shutdown:{__name__}")
self.langfuse.flush()
pass
async def on_valves_updated(self):
# This function is called when the valves are updated.
self.set_langfuse()
pass
def set_langfuse(self):
self.langfuse = Langfuse(
secret_key=self.valves.secret_key,
public_key=self.valves.public_key,
host=self.valves.host,
debug=False,
)
self.langfuse.auth_check()
async def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
print(f"inlet:{__name__}")
trace = self.langfuse.trace(
name=f"filter:{__name__}",
input=body,
user_id=user["id"],
metadata={"name": user["name"]},
session_id=body["chat_id"],
)
generation = trace.generation(
name=body["chat_id"],
model=body["model"],
input=body["messages"],
metadata={"interface": "open-webui"},
)
self.chat_generations[body["chat_id"]] = generation
print(trace.get_trace_url())
return body
async def outlet(self, body: dict, user: Optional[dict] = None) -> dict:
print(f"outlet:{__name__}")
if body["chat_id"] not in self.chat_generations:
return body
generation = self.chat_generations[body["chat_id"]]
user_message = get_last_user_message(body["messages"])
generated_message = get_last_assistant_message(body["messages"])
# Update usage cost based on the length of the input and output messages
# Below does not reflect the actual cost of the API
# You can adjust the cost based on your requirements
generation.end(
output=generated_message,
usage={
"totalCost": (len(user_message) + len(generated_message)) / 1000,
"unit": "CHARACTERS",
},
metadata={"interface": "open-webui"},
)
return body

View File

@@ -4,48 +4,49 @@ from pydantic import BaseModel
import requests
import os
from utils.main import get_last_user_message, get_last_assistant_message
from utils.pipelines.main import get_last_user_message, get_last_assistant_message
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves
libretranslate_url: str
# Source and target languages
# User message will be translated from source_user to target_user
source_user: Optional[str] = "auto"
target_user: Optional[str] = "en"
# Assistant languages
# Assistant message will be translated from source_assistant to target_assistant
source_assistant: Optional[str] = "en"
target_assistant: Optional[str] = "es"
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "libretranslate_filter_pipeline"
# self.id = "libretranslate_filter_pipeline"
self.name = "LibreTranslate Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves
libretranslate_url: str
# Source and target languages
# User message will be translated from source_user to target_user
source_user: Optional[str] = "auto"
target_user: Optional[str] = "en"
# Assistant languages
# Assistant message will be translated from source_assistant to target_assistant
source_assistant: Optional[str] = "en"
target_assistant: Optional[str] = "es"
# Initialize
self.valves = Valves(
self.valves = self.Valves(
**{
"pipelines": ["*"], # Connect to all pipelines
"libretranslate_url": os.getenv(

View File

@@ -4,48 +4,52 @@ from pydantic import BaseModel
from schemas import OpenAIChatMessage
import time
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves for rate limiting
requests_per_minute: Optional[int] = None
requests_per_hour: Optional[int] = None
sliding_window_limit: Optional[int] = None
sliding_window_minutes: Optional[int] = None
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Assign a unique identifier to the pipeline.
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "rate_limit_filter_pipeline"
# self.id = "rate_limit_filter_pipeline"
self.name = "Rate Limit Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves for rate limiting
requests_per_minute: Optional[int] = None
requests_per_hour: Optional[int] = None
sliding_window_limit: Optional[int] = None
sliding_window_minutes: Optional[int] = None
# Initialize rate limits
pipelines = os.getenv("RATE_LIMIT_PIPELINES", "*").split(",")
requests_per_minute = int(os.getenv("RATE_LIMIT_REQUESTS_PER_MINUTE", 10))
requests_per_hour = int(os.getenv("RATE_LIMIT_REQUESTS_PER_HOUR", 1000))
sliding_window_limit = int(os.getenv("RATE_LIMIT_SLIDING_WINDOW_LIMIT", 100))
sliding_window_minutes = int(os.getenv("RATE_LIMIT_SLIDING_WINDOW_MINUTES", 15))
self.valves = Valves(
self.valves = self.Valves(
**{
"pipelines": pipelines,
"requests_per_minute": requests_per_minute,
"requests_per_hour": requests_per_hour,
"sliding_window_limit": sliding_window_limit,
"sliding_window_minutes": sliding_window_minutes,
"pipelines": os.getenv("RATE_LIMIT_PIPELINES", "*").split(","),
"requests_per_minute": int(
os.getenv("RATE_LIMIT_REQUESTS_PER_MINUTE", 10)
),
"requests_per_hour": int(
os.getenv("RATE_LIMIT_REQUESTS_PER_HOUR", 1000)
),
"sliding_window_limit": int(
os.getenv("RATE_LIMIT_SLIDING_WINDOW_LIMIT", 100)
),
"sliding_window_minutes": int(
os.getenv("RATE_LIMIT_SLIDING_WINDOW_MINUTES", 15)
),
}
)

View File

@@ -1,231 +0,0 @@
from typing import List, Optional
from pydantic import BaseModel
from schemas import OpenAIChatMessage
import os
import requests
import json
from utils.main import (
get_last_user_message,
add_or_update_system_message,
get_function_specs,
)
from typing import Literal
class Pipeline:
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Assign a unique identifier to the pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "function_calling_filter_pipeline"
self.name = "Function Calling Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves for function calling
OPENAI_API_BASE_URL: str
OPENAI_API_KEY: str
TASK_MODEL: str
TEMPLATE: str
OPENWEATHERMAP_API_KEY: str = ""
# Initialize valves
self.valves = Valves(
**{
"pipelines": ["*"], # Connect to all pipelines
"OPENAI_API_BASE_URL": "https://api.openai.com/v1",
"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY", "YOUR_OPENAI_API_KEY"),
"TASK_MODEL": "gpt-3.5-turbo",
"TEMPLATE": """Use the following context as your learned knowledge, inside <context></context> XML tags.
<context>
{{CONTEXT}}
</context>
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.""",
}
)
class Functions:
def __init__(self, pipeline) -> None:
self.pipeline = pipeline
def get_current_weather(
self,
location: str,
unit: Literal["metric", "fahrenheit"] = "fahrenheit",
) -> str:
"""
Get the current weather for a location. If the location is not found, return an empty string.
:param location: The location to get the weather for.
:param unit: The unit to get the weather in. Default is fahrenheit.
:return: The current weather for the location.
"""
# https://openweathermap.org/api
if self.pipeline.valves.OPENWEATHERMAP_API_KEY == "":
return "OpenWeatherMap API Key not set, ask the user to set it up."
else:
units = "imperial" if unit == "fahrenheit" else "metric"
params = {
"q": location,
"appid": self.pipeline.valves.OPENWEATHERMAP_API_KEY,
"units": units,
}
response = requests.get(
"http://api.openweathermap.org/data/2.5/weather", params=params
)
response.raise_for_status() # Raises an HTTPError for bad responses
data = response.json()
weather_description = data["weather"][0]["description"]
temperature = data["main"]["temp"]
return f"{location}: {weather_description.capitalize()}, {temperature}°{unit.capitalize()[0]}"
def calculator(self, equation: str) -> str:
"""
Calculate the result of an equation.
:param equation: The equation to calculate.
"""
# Avoid using eval in production code
# https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
try:
result = eval(equation)
return f"{equation} = {result}"
except Exception as e:
print(e)
return "Invalid equation"
self.functions = Functions(self)
async def on_startup(self):
# This function is called when the server is started.
print(f"on_startup:{__name__}")
pass
async def on_shutdown(self):
# This function is called when the server is stopped.
print(f"on_shutdown:{__name__}")
pass
async def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
# If title generation is requested, skip the function calling filter
if body.get("title", False):
return body
print(f"pipe:{__name__}")
print(user)
# Get the last user message
user_message = get_last_user_message(body["messages"])
# Get the function specs
function_specs = get_function_specs(self.functions)
# System prompt for function calling
fc_system_prompt = (
f"Functions: {json.dumps(function_specs, indent=2)}"
+ """
If a function doesn't match the query, return an empty string. Else, pick a function, fill in the parameters from the function's schema, and return it in the format { "name": \"functionName\", "parameters": { "key": "value" } }. Only pick a function if the user asks. Only return the object. Do not return any other text."
"""
)
r = None
try:
# Call the OpenAI API to get the function response
r = requests.post(
url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions",
json={
"model": self.valves.TASK_MODEL,
"messages": [
{
"role": "system",
"content": fc_system_prompt,
},
{
"role": "user",
"content": "History:\n"
+ "\n".join(
[
f"{message['role']}: {message['content']}"
for message in body["messages"][::-1][:4]
]
)
+ f"Query: {user_message}",
},
],
# TODO: dynamically add response_format?
# "response_format": {"type": "json_object"},
},
headers={
"Authorization": f"Bearer {self.valves.OPENAI_API_KEY}",
"Content-Type": "application/json",
},
stream=False,
)
r.raise_for_status()
response = r.json()
content = response["choices"][0]["message"]["content"]
# Parse the function response
if content != "":
result = json.loads(content)
print(result)
# Call the function
if "name" in result:
function = getattr(self.functions, result["name"])
function_result = None
try:
function_result = function(**result["parameters"])
except Exception as e:
print(e)
# Add the function result to the system prompt
if function_result:
system_prompt = self.valves.TEMPLATE.replace(
"{{CONTEXT}}", function_result
)
print(system_prompt)
messages = add_or_update_system_message(
system_prompt, body["messages"]
)
# Return the updated messages
return {**body, "messages": messages}
except Exception as e:
print(f"Error: {e}")
if r:
try:
print(r.json())
except:
pass
return body

View File

@@ -1,102 +0,0 @@
"""
title: Langfuse Filter Pipeline
author: open-webui
date: 2024-05-30
version: 1.0
license: MIT
description: A filter pipeline that uses Langfuse.
requirements: langfuse
"""
from typing import List, Optional
from schemas import OpenAIChatMessage
import os
from pydantic import BaseModel
from langfuse import Langfuse
class Pipeline:
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "langfuse_filter_pipeline"
self.name = "Langfuse Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
# e.g. ["llama3:latest", "gpt-3.5-turbo"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Valves
secret_key: str
public_key: str
host: str
# Initialize
self.valves = Valves(
**{
"pipelines": ["*"], # Connect to all pipelines
"secret_key": os.getenv("LANGFUSE_SECRET_KEY"),
"public_key": os.getenv("LANGFUSE_PUBLIC_KEY"),
"host": os.getenv("LANGFUSE_HOST", "https://cloud.langfuse.com"),
}
)
self.langfuse = None
pass
async def on_startup(self):
# This function is called when the server is started.
print(f"on_startup:{__name__}")
self.set_langfuse()
pass
async def on_shutdown(self):
# This function is called when the server is stopped.
print(f"on_shutdown:{__name__}")
self.langfuse.flush()
pass
async def on_valves_updated(self):
# This function is called when the valves are updated.
self.set_langfuse()
pass
def set_langfuse(self):
self.langfuse = Langfuse(
secret_key=self.valves.secret_key,
public_key=self.valves.public_key,
host=self.valves.host,
debug=False,
)
self.langfuse.auth_check()
async def inlet(self, body: dict, user: Optional[dict] = None) -> dict:
print(f"inlet:{__name__}")
trace = self.langfuse.trace(
name=f"filter:{__name__}",
input=body,
user_id=user["id"],
metadata={"name": user["name"]},
session_id=body["chat_id"]
)
print(trace.get_trace_url())
return body

View File

@@ -9,11 +9,10 @@ from subprocess import call
class Pipeline:
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "applescript_pipeline"
# self.id = "applescript_pipeline"
self.name = "AppleScript Pipeline"
pass

View File

@@ -6,7 +6,10 @@ import subprocess
class Pipeline:
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
self.id = "python_code_pipeline"
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
# self.id = "python_code_pipeline"
self.name = "Python Code Pipeline"
pass

View File

@@ -6,18 +6,19 @@ import os
class Pipeline:
class Valves(BaseModel):
pass
def __init__(self):
# Assign a unique identifier to the pipeline.
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "wiki_pipeline"
# self.id = "wiki_pipeline"
self.name = "Wikipedia Pipeline"
class Valves(BaseModel):
pass
# Initialize rate limits
self.valves = Valves(**{"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY", "")})
self.valves = self.Valves(**{"OPENAI_API_KEY": os.getenv("OPENAI_API_KEY", "")})
async def on_startup(self):
# This function is called when the server is started.

View File

@@ -19,15 +19,17 @@ import requests
class Pipeline:
class Valves(BaseModel):
ANTHROPIC_API_KEY: str = ""
def __init__(self):
self.type = "manifold"
self.id = "anthropic"
self.name = "anthropic/"
class Valves(BaseModel):
ANTHROPIC_API_KEY: str
self.valves = Valves(**{"ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY")})
self.valves = self.Valves(
**{"ANTHROPIC_API_KEY": os.getenv("ANTHROPIC_API_KEY", "your-api-key-here")}
)
self.client = Anthropic(api_key=self.valves.ANTHROPIC_API_KEY)
def get_anthropic_models(self):
@@ -61,6 +63,13 @@ class Pipeline:
self, user_message: str, model_id: str, messages: List[dict], body: dict
) -> Union[str, Generator, Iterator]:
try:
if "user" in body:
del body["user"]
if "chat_id" in body:
del body["chat_id"]
if "title" in body:
del body["title"]
if body.get("stream", False):
return self.stream_response(model_id, messages, body)
else:

View File

@@ -1,16 +1,27 @@
from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
import requests
class Pipeline:
class Valves(BaseModel):
# You can add your custom valves here.
AZURE_OPENAI_API_KEY: str = "your-azure-openai-api-key-here"
AZURE_OPENAI_ENDPOINT: str = "your-azure-openai-endpoint-here"
DEPLOYMENT_NAME: str = "your-deployment-name-here"
API_VERSION: str = "2023-10-01-preview"
MODEL: str = "gpt-3.5-turbo"
pass
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "azure_openai_pipeline"
# self.id = "azure_openai_pipeline"
self.name = "Azure OpenAI Pipeline"
self.valves = self.Valves()
pass
async def on_startup(self):
@@ -32,25 +43,22 @@ class Pipeline:
print(messages)
print(user_message)
AZURE_OPENAI_API_KEY = "your-azure-openai-api-key-here"
AZURE_OPENAI_ENDPOINT = "your-azure-openai-endpoint-here"
DEPLOYMENT_NAME = "your-deployment-name-here"
MODEL = "gpt-3.5-turbo"
headers = {
"api-key": self.valves.AZURE_OPENAI_API_KEY,
"Content-Type": "application/json",
}
headers = {"api-key": AZURE_OPENAI_API_KEY, "Content-Type": "application/json"}
url = f"{AZURE_OPENAI_ENDPOINT}/openai/deployments/{DEPLOYMENT_NAME}/chat/completions?api-version=2023-10-01-preview"
url = f"{self.valves.AZURE_OPENAI_ENDPOINT}/openai/deployments/{self.valves.DEPLOYMENT_NAME}/chat/completions?api-version={self.valves.API_VERSION}"
try:
r = requests.post(
url=url,
json={**body, "model": MODEL},
json={**body, "model": self.valves.MODEL},
headers=headers,
stream=True,
)
r.raise_for_status()
if body["stream"]:
return r.iter_lines()
else:

View File

@@ -18,16 +18,25 @@ import requests
class Pipeline:
class Valves(BaseModel):
COHERE_API_BASE_URL: str = "https://api.cohere.com/v1"
COHERE_API_KEY: str = ""
def __init__(self):
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "cohere"
self.name = "cohere/"
class Valves(BaseModel):
COHERE_API_BASE_URL: str = "https://api.cohere.com/v1"
COHERE_API_KEY: str
self.valves = Valves(**{"COHERE_API_KEY": os.getenv("COHERE_API_KEY")})
self.valves = self.Valves(
**{"COHERE_API_KEY": os.getenv("COHERE_API_KEY", "your-api-key-here")}
)
self.pipelines = self.get_cohere_models()

View File

@@ -0,0 +1,122 @@
from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
import os
import requests
class Pipeline:
class Valves(BaseModel):
GROQ_API_BASE_URL: str = "https://api.groq.com/openai/v1"
GROQ_API_KEY: str = ""
pass
def __init__(self):
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "groq"
self.name = "Groq: "
self.valves = self.Valves(
**{
"GROQ_API_KEY": os.getenv(
"GROQ_API_KEY", "your-groq-api-key-here"
)
}
)
self.pipelines = self.get_models()
pass
async def on_startup(self):
# This function is called when the server is started.
print(f"on_startup:{__name__}")
pass
async def on_shutdown(self):
# This function is called when the server is stopped.
print(f"on_shutdown:{__name__}")
pass
async def on_valves_updated(self):
# This function is called when the valves are updated.
print(f"on_valves_updated:{__name__}")
self.pipelines = self.get_models()
pass
def get_models(self):
if self.valves.GROQ_API_KEY:
try:
headers = {}
headers["Authorization"] = f"Bearer {self.valves.GROQ_API_KEY}"
headers["Content-Type"] = "application/json"
r = requests.get(
f"{self.valves.GROQ_API_BASE_URL}/models", headers=headers
)
models = r.json()
return [
{
"id": model["id"],
"name": model["name"] if "name" in model else model["id"],
}
for model in models["data"]
]
except Exception as e:
print(f"Error: {e}")
return [
{
"id": "error",
"name": "Could not fetch models from Groq, please update the API Key in the valves.",
},
]
else:
return []
def pipe(
self, user_message: str, model_id: str, messages: List[dict], body: dict
) -> Union[str, Generator, Iterator]:
# This is where you can add your custom pipelines like RAG.
print(f"pipe:{__name__}")
print(messages)
print(user_message)
headers = {}
headers["Authorization"] = f"Bearer {self.valves.GROQ_API_KEY}"
headers["Content-Type"] = "application/json"
payload = {**body, "model": model_id}
if "user" in payload:
del payload["user"]
if "chat_id" in payload:
del payload["chat_id"]
if "title" in payload:
del payload["title"]
print(payload)
try:
r = requests.post(
url=f"{self.valves.GROQ_API_BASE_URL}/chat/completions",
json=payload,
headers=headers,
stream=True,
)
r.raise_for_status()
if body["stream"]:
return r.iter_lines()
else:
return r.json()
except Exception as e:
return f"Error: {e}"

View File

@@ -11,9 +11,16 @@ from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
import requests
import os
class Pipeline:
class Valves(BaseModel):
LITELLM_BASE_URL: str = ""
LITELLM_API_KEY: str = ""
LITELLM_PIPELINE_DEBUG: bool = False
def __init__(self):
# You can also set the pipelines that are available in this pipeline.
# Set manifold to True if you want to use this pipeline as a manifold.
@@ -21,19 +28,24 @@ class Pipeline:
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "litellm_manifold"
# self.id = "litellm_manifold"
# Optionally, you can set the name of the manifold pipeline.
self.name = "LiteLLM: "
class Valves(BaseModel):
LITELLM_BASE_URL: str
# Initialize rate limits
self.valves = Valves(**{"LITELLM_BASE_URL": "http://localhost:4001"})
self.valves = self.Valves(
**{
"LITELLM_BASE_URL": os.getenv(
"LITELLM_BASE_URL", "http://localhost:4001"
),
"LITELLM_API_KEY": os.getenv("LITELLM_API_KEY", "your-api-key-here"),
"LITELLM_PIPELINE_DEBUG": os.getenv("LITELLM_PIPELINE_DEBUG", False),
}
)
self.pipelines = []
pass
@@ -54,9 +66,16 @@ class Pipeline:
pass
def get_litellm_models(self):
headers = {}
if self.valves.LITELLM_API_KEY:
headers["Authorization"] = f"Bearer {self.valves.LITELLM_API_KEY}"
if self.valves.LITELLM_BASE_URL:
try:
r = requests.get(f"{self.valves.LITELLM_BASE_URL}/v1/models")
r = requests.get(
f"{self.valves.LITELLM_BASE_URL}/v1/models", headers=headers
)
models = r.json()
return [
{
@@ -69,7 +88,7 @@ class Pipeline:
print(f"Error: {e}")
return [
{
"id": self.id,
"id": "error",
"name": "Could not fetch models from LiteLLM, please update the URL in the valves.",
},
]
@@ -85,10 +104,20 @@ class Pipeline:
print(f"# Message: {user_message}")
print("######################################")
headers = {}
if self.valves.LITELLM_API_KEY:
headers["Authorization"] = f"Bearer {self.valves.LITELLM_API_KEY}"
try:
payload = {**body, "model": model_id, "user": body["user"]["id"]}
payload.pop("chat_id", None)
payload.pop("user", None)
payload.pop("title", None)
r = requests.post(
url=f"{self.valves.LITELLM_BASE_URL}/v1/chat/completions",
json={**body, "model": model_id, "user_id": body["user"]["id"]},
json=payload,
headers=headers,
stream=True,
)

View File

@@ -21,6 +21,12 @@ import yaml
class Pipeline:
class Valves(BaseModel):
LITELLM_CONFIG_DIR: str = "./litellm/config.yaml"
LITELLM_PROXY_PORT: int = 4001
LITELLM_PROXY_HOST: str = "127.0.0.1"
litellm_config: dict = {}
def __init__(self):
# You can also set the pipelines that are available in this pipeline.
# Set manifold to True if you want to use this pipeline as a manifold.
@@ -28,22 +34,16 @@ class Pipeline:
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "litellm_subprocess_manifold"
# self.id = "litellm_subprocess_manifold"
# Optionally, you can set the name of the manifold pipeline.
self.name = "LiteLLM: "
class Valves(BaseModel):
LITELLM_CONFIG_DIR: str = "./litellm/config.yaml"
LITELLM_PROXY_PORT: int = 4001
LITELLM_PROXY_HOST: str = "127.0.0.1"
litellm_config: dict = {}
# Initialize Valves
self.valves = Valves(**{"LITELLM_CONFIG_DIR": f"./litellm/config.yaml"})
self.valves = self.Valves(**{"LITELLM_CONFIG_DIR": f"./litellm/config.yaml"})
self.background_process = None
pass
@@ -173,7 +173,7 @@ class Pipeline:
print(f"Error: {e}")
return [
{
"id": self.id,
"id": "error",
"name": "Could not fetch models from LiteLLM, please update the URL in the valves.",
},
]
@@ -197,7 +197,7 @@ class Pipeline:
try:
r = requests.post(
url=f"http://{self.valves.LITELLM_PROXY_HOST}:{self.valves.LITELLM_PROXY_PORT}/v1/chat/completions",
json={**body, "model": model_id, "user_id": body["user"]["id"]},
json={**body, "model": model_id, "user": body["user"]["id"]},
stream=True,
)

View File

@@ -15,10 +15,10 @@ from schemas import OpenAIChatMessage
class Pipeline:
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "llama_cpp_pipeline"
# self.id = "llama_cpp_pipeline"
self.name = "Llama C++ Pipeline"
self.llm = None

View File

@@ -21,10 +21,10 @@ from huggingface_hub import login
class Pipeline:
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "mlx_pipeline"
# self.id = "mlx_pipeline"
self.name = "MLX Pipeline"
self.host = os.getenv("MLX_HOST", "localhost")
self.port = os.getenv("MLX_PORT", "8080")

View File

@@ -5,6 +5,10 @@ import requests
class Pipeline:
class Valves(BaseModel):
OLLAMA_BASE_URL: str
def __init__(self):
# You can also set the pipelines that are available in this pipeline.
# Set manifold to True if you want to use this pipeline as a manifold.
@@ -12,18 +16,15 @@ class Pipeline:
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "ollama_manifold"
# self.id = "ollama_manifold"
# Optionally, you can set the name of the manifold pipeline.
self.name = "Ollama: "
class Valves(BaseModel):
OLLAMA_BASE_URL: str
self.valves = Valves(**{"OLLAMA_BASE_URL": "http://localhost:11435"})
self.valves = self.Valves(**{"OLLAMA_BASE_URL": "http://localhost:11435"})
self.pipelines = []
pass
@@ -57,7 +58,7 @@ class Pipeline:
print(f"Error: {e}")
return [
{
"id": self.id,
"id": "error",
"name": "Could not fetch models from Ollama, please update the URL in the valves.",
},
]

View File

@@ -6,10 +6,10 @@ import requests
class Pipeline:
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "ollama_pipeline"
# self.id = "ollama_pipeline"
self.name = "Ollama Pipeline"
pass

View File

@@ -0,0 +1,123 @@
from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
import os
import requests
class Pipeline:
class Valves(BaseModel):
OPENAI_API_BASE_URL: str = "https://api.openai.com/v1"
OPENAI_API_KEY: str = ""
pass
def __init__(self):
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
# self.id = "openai_pipeline"
self.name = "OpenAI: "
self.valves = self.Valves(
**{
"OPENAI_API_KEY": os.getenv(
"OPENAI_API_KEY", "your-openai-api-key-here"
)
}
)
self.pipelines = self.get_openai_models()
pass
async def on_startup(self):
# This function is called when the server is started.
print(f"on_startup:{__name__}")
pass
async def on_shutdown(self):
# This function is called when the server is stopped.
print(f"on_shutdown:{__name__}")
pass
async def on_valves_updated(self):
# This function is called when the valves are updated.
print(f"on_valves_updated:{__name__}")
self.pipelines = self.get_openai_models()
pass
def get_openai_models(self):
if self.valves.OPENAI_API_KEY:
try:
headers = {}
headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}"
headers["Content-Type"] = "application/json"
r = requests.get(
f"{self.valves.OPENAI_API_BASE_URL}/models", headers=headers
)
models = r.json()
return [
{
"id": model["id"],
"name": model["name"] if "name" in model else model["id"],
}
for model in models["data"]
if "gpt" in model["id"]
]
except Exception as e:
print(f"Error: {e}")
return [
{
"id": "error",
"name": "Could not fetch models from OpenAI, please update the API Key in the valves.",
},
]
else:
return []
def pipe(
self, user_message: str, model_id: str, messages: List[dict], body: dict
) -> Union[str, Generator, Iterator]:
# This is where you can add your custom pipelines like RAG.
print(f"pipe:{__name__}")
print(messages)
print(user_message)
headers = {}
headers["Authorization"] = f"Bearer {self.valves.OPENAI_API_KEY}"
headers["Content-Type"] = "application/json"
payload = {**body, "model": model_id}
if "user" in payload:
del payload["user"]
if "chat_id" in payload:
del payload["chat_id"]
if "title" in payload:
del payload["title"]
print(payload)
try:
r = requests.post(
url=f"{self.valves.OPENAI_API_BASE_URL}/chat/completions",
json=payload,
headers=headers,
stream=True,
)
r.raise_for_status()
if body["stream"]:
return r.iter_lines()
else:
return r.json()
except Exception as e:
return f"Error: {e}"

View File

@@ -1,16 +1,29 @@
from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
import os
import requests
class Pipeline:
class Valves(BaseModel):
OPENAI_API_KEY: str = ""
pass
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "openai_pipeline"
# self.id = "openai_pipeline"
self.name = "OpenAI Pipeline"
self.valves = self.Valves(
**{
"OPENAI_API_KEY": os.getenv(
"OPENAI_API_KEY", "your-openai-api-key-here"
)
}
)
pass
async def on_startup(self):
@@ -39,10 +52,21 @@ class Pipeline:
headers["Authorization"] = f"Bearer {OPENAI_API_KEY}"
headers["Content-Type"] = "application/json"
payload = {**body, "model": MODEL}
if "user" in payload:
del payload["user"]
if "chat_id" in payload:
del payload["chat_id"]
if "title" in payload:
del payload["title"]
print(payload)
try:
r = requests.post(
url="https://api.openai.com/v1/chat/completions",
json={**body, "model": MODEL},
json=payload,
headers=headers,
stream=True,
)

View File

@@ -1,16 +1,21 @@
from typing import List, Union, Generator, Iterator
from schemas import OpenAIChatMessage
from pydantic import BaseModel
class Pipeline:
class Valves(BaseModel):
pass
def __init__(self):
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "pipeline_example"
self.name = "Pipeline Example"
# self.id = "pipeline_example"
# The name of the pipeline.
self.name = "Pipeline Example"
pass
async def on_startup(self):
@@ -51,6 +56,10 @@ class Pipeline:
# This is where you can add your custom pipelines like RAG.
print(f"pipe:{__name__}")
# If you'd like to check for title generation, you can add the following check
if body.get("title", False):
print("Title Generation Request")
print(messages)
print(user_message)
print(body)

View File

@@ -14,32 +14,33 @@ from schemas import OpenAIChatMessage
class Pipeline:
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Add your custom parameters here
pass
def __init__(self):
# Pipeline filters are only compatible with Open WebUI
# You can think of filter pipeline as a middleware that can be used to edit the form data before it is sent to the OpenAI API.
self.type = "filter"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "filter_pipeline"
# self.id = "filter_pipeline"
self.name = "Filter"
class Valves(BaseModel):
# List target pipeline ids (models) that this filter will be connected to.
# If you want to connect this filter to all pipelines, you can set pipelines to ["*"]
pipelines: List[str] = []
# Assign a priority level to the filter pipeline.
# The priority level determines the order in which the filter pipelines are executed.
# The lower the number, the higher the priority.
priority: int = 0
# Add your custom parameters here
pass
self.valves = Valves(**{"pipelines": ["llama3:latest"]})
self.valves = self.Valves(**{"pipelines": ["llama3:latest"]})
pass
@@ -57,6 +58,10 @@ class Pipeline:
# This filter is applied to the form data before it is sent to the OpenAI API.
print(f"inlet:{__name__}")
# If you'd like to check for title generation, you can add the following check
if body.get("title", False):
print("Title Generation Request")
print(body)
print(user)

View File

@@ -0,0 +1,33 @@
from blueprints.function_calling_blueprint import Pipeline as FunctionCallingBlueprint
class Pipeline(FunctionCallingBlueprint):
class Valves(FunctionCallingBlueprint.Valves):
# Add your custom valves here
pass
class Tools:
def __init__(self, pipeline) -> None:
self.pipeline = pipeline
# Add your custom tools using pure Python code here, make sure to add type hints
# Use Sphinx-style docstrings to document your tools, they will be used for generating tools specifications
# Please refer to function_calling_filter_pipeline.py for an example
pass
def __init__(self):
super().__init__()
# Optionally, you can set the id and name of the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
# self.id = "my_tools_pipeline"
self.name = "My Tools Pipeline"
self.valves = self.Valves(
**{
**self.valves.model_dump(),
"pipelines": ["*"], # Connect to all pipelines
},
)
self.tools = self.Tools(self)

View File

@@ -10,13 +10,16 @@ class Pipeline:
self.type = "manifold"
# Optionally, you can set the id and name of the pipeline.
# Assign a unique identifier to the pipeline.
# Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline.
# The identifier must be unique across all pipelines.
# The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes.
self.id = "manifold_pipeline"
# self.id = "manifold_pipeline"
# Optionally, you can set the name of the manifold pipeline.
self.name = "Manifold: "
# Define pipelines that are available in this manifold pipeline.
# This is a list of dictionaries where each dictionary has an id and name.
self.pipelines = [
{
"id": "pipeline-1", # This will turn into `manifold_pipeline.pipeline-1`
@@ -45,6 +48,10 @@ class Pipeline:
# This is where you can add your custom pipelines like RAG.
print(f"pipe:{__name__}")
# If you'd like to check for title generation, you can add the following check
if body.get("title", False):
print("Title Generation Request")
print(messages)
print(user_message)
print(body)