From a76c9fd3ad3e1eece488073a5db0e630ed4c3d80 Mon Sep 17 00:00:00 2001 From: reddiedev <105657266+reddiedev@users.noreply.github.com> Date: Thu, 1 Aug 2024 02:47:45 +0800 Subject: [PATCH] improved system prompt handling and added support for conversation history --- .../providers/perplexity_manifold_pipeline.py | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/examples/pipelines/providers/perplexity_manifold_pipeline.py b/examples/pipelines/providers/perplexity_manifold_pipeline.py index a5d6c51..70b3e37 100644 --- a/examples/pipelines/providers/perplexity_manifold_pipeline.py +++ b/examples/pipelines/providers/perplexity_manifold_pipeline.py @@ -3,6 +3,9 @@ from pydantic import BaseModel import os import requests +from utils.pipelines.main import pop_system_message + + class Pipeline: class Valves(BaseModel): PERPLEXITY_API_BASE_URL: str = "https://api.perplexity.ai" @@ -26,10 +29,14 @@ class Pipeline: # List of models self.pipelines = [ - {"id": "llama-3-sonar-large-32k-online", "name": "Llama 3 Sonar Large 32K Online"}, - {"id": "llama-3-sonar-small-32k-online", "name": "Llama 3 Sonar Small 32K Online"}, - {"id": "llama-3-sonar-large-32k-chat", "name": "Llama 3 Sonar Large 32K Chat"}, - {"id": "llama-3-sonar-small-32k-chat", "name": "Llama 3 Sonar Small 32K Chat"}, + {"id": "llama-3-sonar-large-32k-online", + "name": "Llama 3 Sonar Large 32K Online"}, + {"id": "llama-3-sonar-small-32k-online", + "name": "Llama 3 Sonar Small 32K Online"}, + {"id": "llama-3-sonar-large-32k-chat", + "name": "Llama 3 Sonar Large 32K Chat"}, + {"id": "llama-3-sonar-small-32k-chat", + "name": "Llama 3 Sonar Small 32K Chat"}, {"id": "llama-3-8b-instruct", "name": "Llama 3 8B Instruct"}, {"id": "llama-3-70b-instruct", "name": "Llama 3 70B Instruct"}, {"id": "mixtral-8x7b-instruct", "name": "Mixtral 8x7B Instruct"}, @@ -59,6 +66,12 @@ class Pipeline: # This is where you can add your custom pipelines like RAG. print(f"pipe:{__name__}") + system_message, messages = pop_system_message(messages) + system_prompt = "You are a helpful assistant." + if system_message is not None: + system_prompt = system_message["content"] + + print(system_prompt) print(messages) print(user_message) @@ -71,8 +84,8 @@ class Pipeline: payload = { "model": model_id, "messages": [ - {"role": "system", "content": "Be precise and concise."}, - {"role": "user", "content": user_message} + {"role": "system", "content": system_prompt}, + *messages ], "stream": body.get("stream", True), "return_citations": True, @@ -124,17 +137,21 @@ class Pipeline: except Exception as e: return f"Error: {e}" + if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Perplexity API Client") - parser.add_argument("--api-key", type=str, required=True, help="API key for Perplexity") - parser.add_argument("--prompt", type=str, required=True, help="Prompt to send to the Perplexity API") + parser.add_argument("--api-key", type=str, required=True, + help="API key for Perplexity") + parser.add_argument("--prompt", type=str, required=True, + help="Prompt to send to the Perplexity API") args = parser.parse_args() pipeline = Pipeline() pipeline.valves.PERPLEXITY_API_KEY = args.api_key - response = pipeline.pipe(user_message=args.prompt, model_id="llama-3-sonar-large-32k-online", messages=[], body={"stream": False}) + response = pipeline.pipe( + user_message=args.prompt, model_id="llama-3-sonar-large-32k-online", messages=[], body={"stream": False}) print("Response:", response)