From 34332092e6a0452aa52fd9389fa793cfcfbcae5c Mon Sep 17 00:00:00 2001 From: g453030291 <453030291@qq.com> Date: Sat, 20 Jul 2024 15:49:47 +0800 Subject: [PATCH 1/7] add aws bedrock claude pipeline example --- .../providers/aws_bedrock_claude_pipeline.py | 172 ++++++++++++++++++ 1 file changed, 172 insertions(+) create mode 100644 examples/pipelines/providers/aws_bedrock_claude_pipeline.py diff --git a/examples/pipelines/providers/aws_bedrock_claude_pipeline.py b/examples/pipelines/providers/aws_bedrock_claude_pipeline.py new file mode 100644 index 0000000..43442a1 --- /dev/null +++ b/examples/pipelines/providers/aws_bedrock_claude_pipeline.py @@ -0,0 +1,172 @@ +import base64 +import json +import logging +from io import BytesIO +from typing import List, Union, Generator, Iterator + +import boto3 + +from schemas import OpenAIChatMessage +from pydantic import BaseModel + +import os +import requests + +from utils.pipelines.main import pop_system_message + + +class Pipeline: + class Valves(BaseModel): + AWS_ACCESS_KEY: str = "" + AWS_SECRET_KEY: str = "" + AWS_REGION_NAME: str = "" + + def __init__(self): + self.type = "manifold" + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "Bedrock Claude: " + + self.valves = self.Valves( + **{ + "AWS_ACCESS_KEY": os.getenv("AWS_ACCESS_KEY", "your-aws-access-key-here"), + "AWS_SECRET_KEY": os.getenv("AWS_SECRET_KEY", "your-aws-secret-key-here"), + "AWS_REGION_NAME": os.getenv("AWS_REGION_NAME", "your-aws-region-name-here"), + } + ) + + self.bedrock = boto3.client(aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + service_name="bedrock", + region_name=self.valves.AWS_REGION_NAME) + self.bedrock_runtime = boto3.client(aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + service_name="bedrock-runtime", + region_name=self.valves.AWS_REGION_NAME) + + self.pipelines = self.get_models() + + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.bedrock = boto3.client(aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + service_name="bedrock", + region_name=self.valves.AWS_REGION_NAME) + self.bedrock_runtime = boto3.client(aws_access_key_id=self.valves.AWS_ACCESS_KEY, + aws_secret_access_key=self.valves.AWS_SECRET_KEY, + service_name="bedrock-runtime", + region_name=self.valves.AWS_REGION_NAME) + self.pipelines = self.get_models() + + def pipelines(self) -> List[dict]: + return self.get_models() + + def get_models(self): + if self.valves.AWS_ACCESS_KEY and self.valves.AWS_SECRET_KEY: + try: + response = self.bedrock.list_foundation_models(byProvider='Anthropic') + return [ + { + "id": model["modelId"], + "name": model["modelName"], + } + for model in response["modelSummaries"] + ] + except Exception as e: + print(f"Error: {e}") + return [ + { + "id": "error", + "name": "Could not fetch models from Bedrock, please update the Access/Secret Key in the valves.", + }, + ] + else: + return [] + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + system_message, messages = pop_system_message(messages) + + logging.info(f"pop_system_message: {json.dumps(messages)}") + + try: + processed_messages = [] + image_count = 0 + for message in messages: + processed_content = [] + if isinstance(message.get("content"), list): + for item in message["content"]: + if item["type"] == "text": + processed_content.append({"text": item["text"]}) + elif item["type"] == "image_url": + if image_count >= 20: + raise ValueError("Maximum of 20 images per API call exceeded") + processed_image = self.process_image(item["image_url"]) + processed_content.append(processed_image) + image_count += 1 + else: + processed_content = [{"text": message.get("content", "")}] + + processed_messages.append({"role": message["role"], "content": processed_content}) + + payload = {"modelId": model_id, + "messages": processed_messages, + "system": [{'text': system_message if system_message else 'you are an intelligent ai assistant'}], + "inferenceConfig": {"temperature": 0.5}, + "additionalModelRequestFields": {"top_k": 200} + } + if body.get("stream", False): + return self.stream_response(model_id, payload) + else: + return self.get_completion(model_id, payload) + except Exception as e: + return f"Error: {e}" + + def process_image(self, image: str): + img_stream = None + if image["url"].startswith("data:image"): + if ',' in image["url"]: + base64_string = image["url"].split(',')[1] + image_data = base64.b64decode(base64_string) + + img_stream = BytesIO(image_data) + else: + img_stream = requests.get(image["url"]).content + return { + "image": {"format": "png" if image["url"].endswith(".png") else "jpeg", + "source": {"bytes": img_stream.read()}} + } + + def stream_response(self, model_id: str, payload: dict) -> Generator: + if "system" in payload: + del payload["system"] + if "additionalModelRequestFields" in payload: + del payload["additionalModelRequestFields"] + streaming_response = self.bedrock_runtime.converse_stream(**payload) + for chunk in streaming_response["stream"]: + if "contentBlockDelta" in chunk: + yield chunk["contentBlockDelta"]["delta"]["text"] + + def get_completion(self, model_id: str, payload: dict) -> str: + response = self.bedrock_runtime.converse(**payload) + return response['output']['message']['content'][0]['text'] + From f45d666439f1df3f2d09a6cd965b01a50a6214ca Mon Sep 17 00:00:00 2001 From: "0xThresh.eth" <0xthresh@protonmail.com> Date: Sun, 11 Aug 2024 22:36:51 -0600 Subject: [PATCH 2/7] Made env vars more generic, added Ollama port to dev-docker.sh --- dev-docker.sh | 2 +- .../pipelines/rag/text_to_sql_pipeline.py | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/dev-docker.sh b/dev-docker.sh index a502b05..c9d256a 100755 --- a/dev-docker.sh +++ b/dev-docker.sh @@ -6,4 +6,4 @@ # Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env ghcr.io/open-webui/pipelines:latest -docker run -d -p 3000:8080 -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! ghcr.io/open-webui/open-webui:ollama \ No newline at end of file +docker run -d -p 3000:8080 -p 11434:11434 --add-host=host.docker.internal:host-gateway -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! -e OLLAMA_HOST=0.0.0.0 ghcr.io/open-webui/open-webui:ollama \ No newline at end of file diff --git a/examples/pipelines/rag/text_to_sql_pipeline.py b/examples/pipelines/rag/text_to_sql_pipeline.py index 22f936f..31471ad 100644 --- a/examples/pipelines/rag/text_to_sql_pipeline.py +++ b/examples/pipelines/rag/text_to_sql_pipeline.py @@ -1,8 +1,8 @@ """ title: Llama Index DB Pipeline author: 0xThresh -date: 2024-07-01 -version: 1.0 +date: 2024-08-11 +version: 1.1 license: MIT description: A pipeline for using text-to-SQL for retrieving relevant information from a database using the Llama Index library. requirements: llama_index, sqlalchemy, psycopg2-binary @@ -24,7 +24,7 @@ class Pipeline: DB_USER: str DB_PASSWORD: str DB_DATABASE: str - DB_TABLES: list[str] + DB_TABLE: str OLLAMA_HOST: str TEXT_TO_SQL_MODEL: str @@ -39,14 +39,14 @@ class Pipeline: self.valves = self.Valves( **{ "pipelines": ["*"], # Connect to all pipelines - "DB_HOST": os.getenv("PG_HOST", "http://localhost:5432"), # Database hostname - "DB_PORT": os.getenv("PG_PORT", 5432), # Database port - "DB_USER": os.getenv("PG_USER", "postgres"), # User to connect to the database with - "DB_PASSWORD": os.getenv("PG_PASSWORD", "password"), # Password to connect to the database with - "DB_DATABASE": os.getenv("PG_DB", "postgres"), # Database to select on the DB instance - "DB_TABLES": ["albums"], # Table(s) to run queries against + "DB_HOST": os.getenv("DB_HOST", "http://localhost"), # Database hostname + "DB_PORT": os.getenv("DB_PORT", 5432), # Database port + "DB_USER": os.getenv("DB_USER", "postgres"), # User to connect to the database with + "DB_PASSWORD": os.getenv("DB_PASSWORD", "password"), # Password to connect to the database with + "DB_DATABASE": os.getenv("DB_DATABASE", "postgres"), # Database to select on the DB instance + "DB_TABLE": os.getenv("DB_TABLE", "table_name"), # Table(s) to run queries against "OLLAMA_HOST": os.getenv("OLLAMA_HOST", "http://host.docker.internal:11434"), # Make sure to update with the URL of your Ollama host, such as http://localhost:11434 or remote server address - "TEXT_TO_SQL_MODEL": "phi3:latest" # Model to use for text-to-SQL generation + "TEXT_TO_SQL_MODEL": os.getenv("TEXT_TO_SQL_MODEL", "llama3.1:latest") # Model to use for text-to-SQL generation } ) @@ -69,7 +69,7 @@ class Pipeline: # Debug logging is required to see what SQL query is generated by the LlamaIndex library; enable on Pipelines server if needed # Create database reader for Postgres - sql_database = SQLDatabase(self.engine, include_tables=self.valves.DB_TABLES) + sql_database = SQLDatabase(self.engine, include_tables=[self.valves.DB_TABLE]) # Set up LLM connection; uses phi3 model with 128k context limit since some queries have returned 20k+ tokens llm = Ollama(model=self.valves.TEXT_TO_SQL_MODEL, base_url=self.valves.OLLAMA_HOST, request_timeout=180.0, context_window=30000) @@ -99,7 +99,7 @@ class Pipeline: query_engine = NLSQLTableQueryEngine( sql_database=sql_database, - tables=self.valves.DB_TABLES, + tables=[self.valves.DB_TABLE], llm=llm, embed_model="local", text_to_sql_prompt=text_to_sql_template, From d86ce893fdaed988a9ea56a72729696a4abc20ce Mon Sep 17 00:00:00 2001 From: Jonas Date: Thu, 15 Aug 2024 09:35:33 +0000 Subject: [PATCH 3/7] cloudflare ai initial draft --- .../providers/cloudflare_ai_pipeline.py | 102 ++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 examples/pipelines/providers/cloudflare_ai_pipeline.py diff --git a/examples/pipelines/providers/cloudflare_ai_pipeline.py b/examples/pipelines/providers/cloudflare_ai_pipeline.py new file mode 100644 index 0000000..5559e17 --- /dev/null +++ b/examples/pipelines/providers/cloudflare_ai_pipeline.py @@ -0,0 +1,102 @@ +from typing import List, Union, Generator, Iterator +from schemas import OpenAIChatMessage +from pydantic import BaseModel + +import os +import requests + + +class Pipeline: + class Valves(BaseModel): + CLOUDFLARE_ACCOUNT_ID: str = "" + CLOUDFLARE_API_KEY: str = "" + CLOUDFLARE_MODELS: str = "" + pass + + def __init__(self): + self.type = "manifold" + # Optionally, you can set the id and name of the pipeline. + # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. + # The identifier must be unique across all pipelines. + # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. + # self.id = "openai_pipeline" + self.name = "Cloudflare AI: " + + self.valves = self.Valves( + **{ + "CLOUDFLARE_API_KEY": os.getenv( + "CLOUDFLARE_API_KEY", "your-openai-api-key-here" + ), + "CLOUDFLARE_MODELS": os.getenv( + "CLOUDFLARE_MODELS", + "@cf/meta/llama-3.1-8,@cf/deepseek-ai/deepseek-math-7b-instruct", + ), + }, + ) + + self.pipelines = self.get_cloudflare_models() + pass + + def get_cloudflare_models(self): + models = [ + {"id": model, "name": model} + for model in self.valves.CLOUDFLARE_MODELS.split(",") + ] + return models + + async def on_startup(self): + # This function is called when the server is started. + print(f"on_startup:{__name__}") + pass + + async def on_shutdown(self): + # This function is called when the server is stopped. + print(f"on_shutdown:{__name__}") + pass + + async def on_valves_updated(self): + # This function is called when the valves are updated. + print(f"on_valves_updated:{__name__}") + self.pipelines = self.get_cloudflare_models() + pass + + def pipe( + self, user_message: str, model_id: str, messages: List[dict], body: dict + ) -> Union[str, Generator, Iterator]: + # This is where you can add your custom pipelines like RAG. + print(f"pipe:{__name__}") + + print(messages) + print(user_message) + + headers = {} + headers["Authorization"] = f"Bearer {self.valves.CLOUDFLARE_API_KEY}" + headers["Content-Type"] = "application/json" + + payload = {**body, "model": model_id} + + if "user" in payload: + del payload["user"] + if "chat_id" in payload: + del payload["chat_id"] + if "title" in payload: + del payload["title"] + + print(payload) + + try: + r = requests.post( + url=f"https://api.cloudflare.com/client/v4/accounts/{self.valves.CLOUDFLARE_ACCOUNT_ID}/ai/v1/chat/completions", + json=payload, + headers=headers, + stream=True, + ) + + r.raise_for_status() + + if body["stream"]: + return r.iter_lines() + else: + return r.json() + except Exception as e: + return f"Error: {e}" From c50d4eb8f8a76fff81b6243e1c3bfb458c397b90 Mon Sep 17 00:00:00 2001 From: Jonas Date: Fri, 16 Aug 2024 04:04:25 +0000 Subject: [PATCH 4/7] cloudlfare ai pipeline --- .../providers/cloudflare_ai_pipeline.py | 41 +++++-------------- 1 file changed, 11 insertions(+), 30 deletions(-) diff --git a/examples/pipelines/providers/cloudflare_ai_pipeline.py b/examples/pipelines/providers/cloudflare_ai_pipeline.py index 5559e17..3bbcadc 100644 --- a/examples/pipelines/providers/cloudflare_ai_pipeline.py +++ b/examples/pipelines/providers/cloudflare_ai_pipeline.py @@ -1,7 +1,6 @@ from typing import List, Union, Generator, Iterator from schemas import OpenAIChatMessage from pydantic import BaseModel - import os import requests @@ -10,40 +9,33 @@ class Pipeline: class Valves(BaseModel): CLOUDFLARE_ACCOUNT_ID: str = "" CLOUDFLARE_API_KEY: str = "" - CLOUDFLARE_MODELS: str = "" + CLOUDFLARE_MODEL: str = "" pass def __init__(self): - self.type = "manifold" # Optionally, you can set the id and name of the pipeline. # Best practice is to not specify the id so that it can be automatically inferred from the filename, so that users can install multiple versions of the same pipeline. # The identifier must be unique across all pipelines. # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. # self.id = "openai_pipeline" - self.name = "Cloudflare AI: " - + self.name = "Cloudlfare AI" self.valves = self.Valves( **{ + "CLOUDFLARE_ACCOUNT_ID": os.getenv( + "CLOUDFLARE_ACCOUNT_ID", + "your-account-id", + ), "CLOUDFLARE_API_KEY": os.getenv( - "CLOUDFLARE_API_KEY", "your-openai-api-key-here" + "CLOUDFLARE_API_KEY", "your-cloudflare-api-key" ), - "CLOUDFLARE_MODELS": os.getenv( + "CLOUDFLARE_MODEL": os.getenv( "CLOUDFLARE_MODELS", - "@cf/meta/llama-3.1-8,@cf/deepseek-ai/deepseek-math-7b-instruct", + "@cf/meta/llama-3.1-8b-instruct", ), - }, + } ) - - self.pipelines = self.get_cloudflare_models() pass - def get_cloudflare_models(self): - models = [ - {"id": model, "name": model} - for model in self.valves.CLOUDFLARE_MODELS.split(",") - ] - return models - async def on_startup(self): # This function is called when the server is started. print(f"on_startup:{__name__}") @@ -54,26 +46,17 @@ class Pipeline: print(f"on_shutdown:{__name__}") pass - async def on_valves_updated(self): - # This function is called when the valves are updated. - print(f"on_valves_updated:{__name__}") - self.pipelines = self.get_cloudflare_models() - pass - def pipe( self, user_message: str, model_id: str, messages: List[dict], body: dict ) -> Union[str, Generator, Iterator]: # This is where you can add your custom pipelines like RAG. print(f"pipe:{__name__}") - print(messages) - print(user_message) - headers = {} headers["Authorization"] = f"Bearer {self.valves.CLOUDFLARE_API_KEY}" headers["Content-Type"] = "application/json" - payload = {**body, "model": model_id} + payload = {**body, "model": self.valves.CLOUDFLARE_MODEL} if "user" in payload: del payload["user"] @@ -82,8 +65,6 @@ class Pipeline: if "title" in payload: del payload["title"] - print(payload) - try: r = requests.post( url=f"https://api.cloudflare.com/client/v4/accounts/{self.valves.CLOUDFLARE_ACCOUNT_ID}/ai/v1/chat/completions", From f962e6338dc3f283d76a671497a7070389ce1b3c Mon Sep 17 00:00:00 2001 From: g453030291 <453030291@qq.com> Date: Sun, 18 Aug 2024 11:41:34 +0800 Subject: [PATCH 5/7] add title and some bugfix --- .../providers/aws_bedrock_claude_pipeline.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/examples/pipelines/providers/aws_bedrock_claude_pipeline.py b/examples/pipelines/providers/aws_bedrock_claude_pipeline.py index 43442a1..4990927 100644 --- a/examples/pipelines/providers/aws_bedrock_claude_pipeline.py +++ b/examples/pipelines/providers/aws_bedrock_claude_pipeline.py @@ -1,3 +1,13 @@ +""" +title: AWS Bedrock Claude Pipeline +author: G-mario +date: 2024-08-18 +version: 1.0 +license: MIT +description: A pipeline for generating text and processing images using the AWS Bedrock API(By Anthropic claude). +requirements: requests, boto3 +environment_variables: AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION_NAME +""" import base64 import json import logging @@ -6,7 +16,6 @@ from typing import List, Union, Generator, Iterator import boto3 -from schemas import OpenAIChatMessage from pydantic import BaseModel import os @@ -28,7 +37,7 @@ class Pipeline: # The identifier must be unique across all pipelines. # The identifier must be an alphanumeric string that can include underscores or hyphens. It cannot contain spaces, special characters, slashes, or backslashes. # self.id = "openai_pipeline" - self.name = "Bedrock Claude: " + self.name = "Bedrock: " self.valves = self.Valves( **{ @@ -79,7 +88,7 @@ class Pipeline: def get_models(self): if self.valves.AWS_ACCESS_KEY and self.valves.AWS_SECRET_KEY: try: - response = self.bedrock.list_foundation_models(byProvider='Anthropic') + response = self.bedrock.list_foundation_models(byProvider='Anthropic', byInferenceType='ON_DEMAND') return [ { "id": model["modelId"], @@ -131,8 +140,8 @@ class Pipeline: payload = {"modelId": model_id, "messages": processed_messages, "system": [{'text': system_message if system_message else 'you are an intelligent ai assistant'}], - "inferenceConfig": {"temperature": 0.5}, - "additionalModelRequestFields": {"top_k": 200} + "inferenceConfig": {"temperature": body.get("temperature", 0.5)}, + "additionalModelRequestFields": {"top_k": body.get("top_k", 200), "top_p": body.get("top_p", 0.9)} } if body.get("stream", False): return self.stream_response(model_id, payload) From 030326319770b47809a76731d4476bd73c685950 Mon Sep 17 00:00:00 2001 From: kmishmael Date: Sun, 1 Sep 2024 18:52:09 +0300 Subject: [PATCH 6/7] fix: initialize request object, correct typo, and rename parameter - Initialize `r` to `None` to prevent potential `NameError`. - Correct typo: change "funcions" to "functions" in allowed parameters. - Rename "dataSources" to "data_sources" to align with API specs. --- examples/pipelines/providers/azure_openai_pipeline.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/examples/pipelines/providers/azure_openai_pipeline.py b/examples/pipelines/providers/azure_openai_pipeline.py index 39146e0..bb4e6e7 100644 --- a/examples/pipelines/providers/azure_openai_pipeline.py +++ b/examples/pipelines/providers/azure_openai_pipeline.py @@ -56,8 +56,8 @@ class Pipeline: url = f"{self.valves.AZURE_OPENAI_ENDPOINT}/openai/deployments/{self.valves.AZURE_OPENAI_DEPLOYMENT_NAME}/chat/completions?api-version={self.valves.AZURE_OPENAI_API_VERSION}" allowed_params = {'messages', 'temperature', 'role', 'content', 'contentPart', 'contentPartImage', - 'enhancements', 'dataSources', 'n', 'stream', 'stop', 'max_tokens', 'presence_penalty', - 'frequency_penalty', 'logit_bias', 'user', 'function_call', 'funcions', 'tools', + 'enhancements', 'data_sources', 'n', 'stream', 'stop', 'max_tokens', 'presence_penalty', + 'frequency_penalty', 'logit_bias', 'user', 'function_call', 'functions', 'tools', 'tool_choice', 'top_p', 'log_probs', 'top_logprobs', 'response_format', 'seed'} # remap user field if "user" in body and not isinstance(body["user"], str): @@ -67,6 +67,8 @@ class Pipeline: if len(body) != len(filtered_body): print(f"Dropped params: {', '.join(set(body.keys()) - set(filtered_body.keys()))}") + # Initialize the response variable to None. + r = None try: r = requests.post( url=url, From 2bb74b1e7805d560f3ee79573a6d25e7bde0f269 Mon Sep 17 00:00:00 2001 From: berjaoui Date: Mon, 2 Sep 2024 14:17:45 +0200 Subject: [PATCH 7/7] Update home_assistant_filter.py Corrected requirements for difflib (typo "difflab") --- examples/filters/home_assistant_filter.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/filters/home_assistant_filter.py b/examples/filters/home_assistant_filter.py index 6cf364a..a86ed9f 100644 --- a/examples/filters/home_assistant_filter.py +++ b/examples/filters/home_assistant_filter.py @@ -5,7 +5,7 @@ date: 2024-06-15 version: 1.0 license: MIT description: A pipeline for controlling Home Assistant entities based on their easy names. Only supports lights at the moment. -requirements: pytz, difflab +requirements: pytz, difflib """ import requests from typing import Literal, Dict, Any @@ -113,4 +113,4 @@ class Pipeline(FunctionCallingBlueprint): "pipelines": ["*"], # Connect to all pipelines }, ) - self.tools = self.Tools(self) \ No newline at end of file + self.tools = self.Tools(self)