From 168ab964494f162e1b6efc403c46ee904a23e9c9 Mon Sep 17 00:00:00 2001 From: "Timothy J. Baek" Date: Wed, 22 May 2024 10:33:16 -0700 Subject: [PATCH] feat: applescript pipeline example --- pipelines/examples/applescript_pipeline.py | 86 ++++++++++++++++++++++ pipelines/pipeline.py | 28 ------- 2 files changed, 86 insertions(+), 28 deletions(-) create mode 100644 pipelines/examples/applescript_pipeline.py delete mode 100644 pipelines/pipeline.py diff --git a/pipelines/examples/applescript_pipeline.py b/pipelines/examples/applescript_pipeline.py new file mode 100644 index 0000000..fe8286e --- /dev/null +++ b/pipelines/examples/applescript_pipeline.py @@ -0,0 +1,86 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +import requests + + +from subprocess import call + + +class Pipeline: + def __init__(self): + # Optionally, you can set the id and name of the pipeline. + self.id = "applescript_pipeline" + self.name = "AppleScript Pipeline" + pass + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + def get_response( + self, user_message: str, messages: List[OpenAIChatMessage], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG.' + print(f"get_response:{__name__}") + + OLLAMA_BASE_URL = "http://localhost:11434" + MODEL = "llama3" + + if body.get("title", False): + print("Title Generation") + return "PyAutoGUI Pipeline" + else: + if "user" in body: + print("######################################") + print(f'# User: {body["user"]["name"]} ({body["user"]["id"]})') + print(f"# Message: {user_message}") + print("######################################") + + commands = user_message.split(" ") + + if commands[0] == "volume": + + try: + commands[1] = int(commands[1]) + if 0 <= commands[1] <= 100: + call( + [f"osascript -e 'set volume output volume {commands[1]}'"], + shell=True, + ) + except: + pass + + payload = { + "model": MODEL, + "messages": [ + { + "role": "system", + "content": f"You are an agent of the AppleScript Pipeline. You have the power to control the volume of the system.", + }, + {"role": "user", "content": user_message}, + ], + "stream": body["stream"], + } + + try: + r = requests.post( + url=f"{OLLAMA_BASE_URL}/v1/chat/completions", + json=payload, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" diff --git a/pipelines/pipeline.py b/pipelines/pipeline.py deleted file mode 100644 index 0fda0ad..0000000 --- a/pipelines/pipeline.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import List, Union, Generator -from schemas import OpenAIChatMessage - - -class Pipeline: - def __init__(self): - pass - - async def on_startup(self): - # This function is called when the server is started. - print(f"on_startup:{__name__}") - pass - - async def on_shutdown(self): - # This function is called when the server is stopped. - print(f"on_shutdown:{__name__}") - pass - - def get_response( - self, user_message: str, messages: List[OpenAIChatMessage], body: dict - ) -> Union[str, Generator]: - # This is where you can add your custom pipelines like RAG.' - print(f"get_response:{__name__}") - - print(messages) - print(user_message) - - return f"{__name__} response to: {user_message}"