mirror of
https://github.com/open-webui/pipelines
synced 2025-05-10 23:50:45 +00:00
improved system prompt handling and added support for conversation history
This commit is contained in:
parent
c76d24b032
commit
a76c9fd3ad
@ -3,6 +3,9 @@ from pydantic import BaseModel
|
||||
import os
|
||||
import requests
|
||||
|
||||
from utils.pipelines.main import pop_system_message
|
||||
|
||||
|
||||
class Pipeline:
|
||||
class Valves(BaseModel):
|
||||
PERPLEXITY_API_BASE_URL: str = "https://api.perplexity.ai"
|
||||
@ -26,10 +29,14 @@ class Pipeline:
|
||||
|
||||
# List of models
|
||||
self.pipelines = [
|
||||
{"id": "llama-3-sonar-large-32k-online", "name": "Llama 3 Sonar Large 32K Online"},
|
||||
{"id": "llama-3-sonar-small-32k-online", "name": "Llama 3 Sonar Small 32K Online"},
|
||||
{"id": "llama-3-sonar-large-32k-chat", "name": "Llama 3 Sonar Large 32K Chat"},
|
||||
{"id": "llama-3-sonar-small-32k-chat", "name": "Llama 3 Sonar Small 32K Chat"},
|
||||
{"id": "llama-3-sonar-large-32k-online",
|
||||
"name": "Llama 3 Sonar Large 32K Online"},
|
||||
{"id": "llama-3-sonar-small-32k-online",
|
||||
"name": "Llama 3 Sonar Small 32K Online"},
|
||||
{"id": "llama-3-sonar-large-32k-chat",
|
||||
"name": "Llama 3 Sonar Large 32K Chat"},
|
||||
{"id": "llama-3-sonar-small-32k-chat",
|
||||
"name": "Llama 3 Sonar Small 32K Chat"},
|
||||
{"id": "llama-3-8b-instruct", "name": "Llama 3 8B Instruct"},
|
||||
{"id": "llama-3-70b-instruct", "name": "Llama 3 70B Instruct"},
|
||||
{"id": "mixtral-8x7b-instruct", "name": "Mixtral 8x7B Instruct"},
|
||||
@ -59,6 +66,12 @@ class Pipeline:
|
||||
# This is where you can add your custom pipelines like RAG.
|
||||
print(f"pipe:{__name__}")
|
||||
|
||||
system_message, messages = pop_system_message(messages)
|
||||
system_prompt = "You are a helpful assistant."
|
||||
if system_message is not None:
|
||||
system_prompt = system_message["content"]
|
||||
|
||||
print(system_prompt)
|
||||
print(messages)
|
||||
print(user_message)
|
||||
|
||||
@ -71,8 +84,8 @@ class Pipeline:
|
||||
payload = {
|
||||
"model": model_id,
|
||||
"messages": [
|
||||
{"role": "system", "content": "Be precise and concise."},
|
||||
{"role": "user", "content": user_message}
|
||||
{"role": "system", "content": system_prompt},
|
||||
*messages
|
||||
],
|
||||
"stream": body.get("stream", True),
|
||||
"return_citations": True,
|
||||
@ -124,17 +137,21 @@ class Pipeline:
|
||||
except Exception as e:
|
||||
return f"Error: {e}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Perplexity API Client")
|
||||
parser.add_argument("--api-key", type=str, required=True, help="API key for Perplexity")
|
||||
parser.add_argument("--prompt", type=str, required=True, help="Prompt to send to the Perplexity API")
|
||||
parser.add_argument("--api-key", type=str, required=True,
|
||||
help="API key for Perplexity")
|
||||
parser.add_argument("--prompt", type=str, required=True,
|
||||
help="Prompt to send to the Perplexity API")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
pipeline = Pipeline()
|
||||
pipeline.valves.PERPLEXITY_API_KEY = args.api_key
|
||||
response = pipeline.pipe(user_message=args.prompt, model_id="llama-3-sonar-large-32k-online", messages=[], body={"stream": False})
|
||||
response = pipeline.pipe(
|
||||
user_message=args.prompt, model_id="llama-3-sonar-large-32k-online", messages=[], body={"stream": False})
|
||||
|
||||
print("Response:", response)
|
||||
|
Loading…
Reference in New Issue
Block a user