mirror of
https://github.com/open-webui/pipelines
synced 2025-05-14 01:20:48 +00:00
Merge pull request #124 from justinh-rahb/anthropic-0.1.2
Update Anthropic Manifold Pipeline to v0.1.2
This commit is contained in:
commit
4907bd0692
@ -2,9 +2,9 @@
|
|||||||
title: Anthropic Manifold Pipeline
|
title: Anthropic Manifold Pipeline
|
||||||
author: justinh-rahb
|
author: justinh-rahb
|
||||||
date: 2024-06-20
|
date: 2024-06-20
|
||||||
version: 1.1
|
version: 1.2
|
||||||
license: MIT
|
license: MIT
|
||||||
description: A pipeline for generating text using the Anthropic API.
|
description: A pipeline for generating text and processing images using the Anthropic API.
|
||||||
requirements: requests, anthropic
|
requirements: requests, anthropic
|
||||||
environment_variables: ANTHROPIC_API_KEY
|
environment_variables: ANTHROPIC_API_KEY
|
||||||
"""
|
"""
|
||||||
@ -35,13 +35,11 @@ class Pipeline:
|
|||||||
self.client = Anthropic(api_key=self.valves.ANTHROPIC_API_KEY)
|
self.client = Anthropic(api_key=self.valves.ANTHROPIC_API_KEY)
|
||||||
|
|
||||||
def get_anthropic_models(self):
|
def get_anthropic_models(self):
|
||||||
# In the future, this could fetch models dynamically from Anthropic
|
|
||||||
return [
|
return [
|
||||||
{"id": "claude-3-haiku-20240307", "name": "claude-3-haiku"},
|
{"id": "claude-3-haiku-20240307", "name": "claude-3-haiku"},
|
||||||
{"id": "claude-3-opus-20240229", "name": "claude-3-opus"},
|
{"id": "claude-3-opus-20240229", "name": "claude-3-opus"},
|
||||||
{"id": "claude-3-sonnet-20240229", "name": "claude-3-sonnet"},
|
{"id": "claude-3-sonnet-20240229", "name": "claude-3-sonnet"},
|
||||||
{"id": "claude-3-5-sonnet-20240620", "name": "claude-3.5-sonnet"},
|
{"id": "claude-3-5-sonnet-20240620", "name": "claude-3.5-sonnet"},
|
||||||
# Add other Anthropic models here as they become available
|
|
||||||
]
|
]
|
||||||
|
|
||||||
async def on_startup(self):
|
async def on_startup(self):
|
||||||
@ -53,63 +51,94 @@ class Pipeline:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
async def on_valves_updated(self):
|
async def on_valves_updated(self):
|
||||||
# This function is called when the valves are updated.
|
|
||||||
self.client = Anthropic(api_key=self.valves.ANTHROPIC_API_KEY)
|
self.client = Anthropic(api_key=self.valves.ANTHROPIC_API_KEY)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Pipelines are the models that are available in the manifold.
|
|
||||||
# It can be a list or a function that returns a list.
|
|
||||||
def pipelines(self) -> List[dict]:
|
def pipelines(self) -> List[dict]:
|
||||||
return self.get_anthropic_models()
|
return self.get_anthropic_models()
|
||||||
|
|
||||||
|
def process_image(self, image_data):
|
||||||
|
if image_data["image_url"]["url"].startswith("data:image"):
|
||||||
|
mime_type, base64_data = image_data["image_url"]["url"].split(",", 1)
|
||||||
|
media_type = mime_type.split(":")[1].split(";")[0]
|
||||||
|
return {
|
||||||
|
"type": "image",
|
||||||
|
"source": {
|
||||||
|
"type": "base64",
|
||||||
|
"media_type": media_type,
|
||||||
|
"data": base64_data,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
return {
|
||||||
|
"type": "image",
|
||||||
|
"source": {"type": "url", "url": image_data["image_url"]["url"]},
|
||||||
|
}
|
||||||
|
|
||||||
def pipe(
|
def pipe(
|
||||||
self, user_message: str, model_id: str, messages: List[dict], body: dict
|
self, user_message: str, model_id: str, messages: List[dict], body: dict
|
||||||
) -> Union[str, Generator, Iterator]:
|
) -> Union[str, Generator, Iterator]:
|
||||||
try:
|
try:
|
||||||
if "user" in body:
|
# Remove unnecessary keys
|
||||||
del body["user"]
|
for key in ['user', 'chat_id', 'title']:
|
||||||
if "chat_id" in body:
|
body.pop(key, None)
|
||||||
del body["chat_id"]
|
|
||||||
if "title" in body:
|
system_message, messages = pop_system_message(messages)
|
||||||
del body["title"]
|
|
||||||
|
processed_messages = []
|
||||||
|
image_count = 0
|
||||||
|
total_image_size = 0
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
processed_content = []
|
||||||
|
if isinstance(message.get("content"), list):
|
||||||
|
for item in message["content"]:
|
||||||
|
if item["type"] == "text":
|
||||||
|
processed_content.append({"type": "text", "text": item["text"]})
|
||||||
|
elif item["type"] == "image_url":
|
||||||
|
if image_count >= 5:
|
||||||
|
raise ValueError("Maximum of 5 images per API call exceeded")
|
||||||
|
|
||||||
|
processed_image = self.process_image(item)
|
||||||
|
processed_content.append(processed_image)
|
||||||
|
|
||||||
|
if processed_image["source"]["type"] == "base64":
|
||||||
|
image_size = len(processed_image["source"]["data"]) * 3 / 4
|
||||||
|
else:
|
||||||
|
image_size = 0
|
||||||
|
|
||||||
|
total_image_size += image_size
|
||||||
|
if total_image_size > 100 * 1024 * 1024:
|
||||||
|
raise ValueError("Total size of images exceeds 100 MB limit")
|
||||||
|
|
||||||
|
image_count += 1
|
||||||
|
else:
|
||||||
|
processed_content = [{"type": "text", "text": message.get("content", "")}]
|
||||||
|
|
||||||
|
processed_messages.append({"role": message["role"], "content": processed_content})
|
||||||
|
|
||||||
|
# Prepare the payload
|
||||||
|
payload = {
|
||||||
|
"model": model_id,
|
||||||
|
"messages": processed_messages,
|
||||||
|
"max_tokens": body.get("max_tokens", 4096),
|
||||||
|
"temperature": body.get("temperature", 0.8),
|
||||||
|
"top_k": body.get("top_k", 40),
|
||||||
|
"top_p": body.get("top_p", 0.9),
|
||||||
|
"stop_sequences": body.get("stop", []),
|
||||||
|
**({"system": str(system_message)} if system_message else {}),
|
||||||
|
"stream": body.get("stream", False),
|
||||||
|
}
|
||||||
|
|
||||||
if body.get("stream", False):
|
if body.get("stream", False):
|
||||||
return self.stream_response(model_id, messages, body)
|
return self.stream_response(model_id, payload)
|
||||||
else:
|
else:
|
||||||
return self.get_completion(model_id, messages, body)
|
return self.get_completion(model_id, payload)
|
||||||
except (RateLimitError, APIStatusError, APIConnectionError) as e:
|
except (RateLimitError, APIStatusError, APIConnectionError) as e:
|
||||||
return f"Error: {e}"
|
return f"Error: {e}"
|
||||||
|
|
||||||
def stream_response(
|
def stream_response(self, model_id: str, payload: dict) -> Generator:
|
||||||
self, model_id: str, messages: List[dict], body: dict
|
stream = self.client.messages.create(**payload)
|
||||||
) -> Generator:
|
|
||||||
system_message, messages = pop_system_message(messages)
|
|
||||||
|
|
||||||
max_tokens = (
|
|
||||||
body.get("max_tokens") if body.get("max_tokens") is not None else 4096
|
|
||||||
)
|
|
||||||
temperature = (
|
|
||||||
body.get("temperature") if body.get("temperature") is not None else 0.8
|
|
||||||
)
|
|
||||||
top_k = body.get("top_k") if body.get("top_k") is not None else 40
|
|
||||||
top_p = body.get("top_p") if body.get("top_p") is not None else 0.9
|
|
||||||
stop_sequences = body.get("stop") if body.get("stop") is not None else []
|
|
||||||
|
|
||||||
stream = self.client.messages.create(
|
|
||||||
**{
|
|
||||||
"model": model_id,
|
|
||||||
**(
|
|
||||||
{"system": system_message} if system_message else {}
|
|
||||||
), # Add system message if it exists (optional
|
|
||||||
"messages": messages,
|
|
||||||
"max_tokens": max_tokens,
|
|
||||||
"temperature": temperature,
|
|
||||||
"top_k": top_k,
|
|
||||||
"top_p": top_p,
|
|
||||||
"stop_sequences": stop_sequences,
|
|
||||||
"stream": True,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for chunk in stream:
|
for chunk in stream:
|
||||||
if chunk.type == "content_block_start":
|
if chunk.type == "content_block_start":
|
||||||
@ -117,31 +146,6 @@ class Pipeline:
|
|||||||
elif chunk.type == "content_block_delta":
|
elif chunk.type == "content_block_delta":
|
||||||
yield chunk.delta.text
|
yield chunk.delta.text
|
||||||
|
|
||||||
def get_completion(self, model_id: str, messages: List[dict], body: dict) -> str:
|
def get_completion(self, model_id: str, payload: dict) -> str:
|
||||||
system_message, messages = pop_system_message(messages)
|
response = self.client.messages.create(**payload)
|
||||||
|
|
||||||
max_tokens = (
|
|
||||||
body.get("max_tokens") if body.get("max_tokens") is not None else 4096
|
|
||||||
)
|
|
||||||
temperature = (
|
|
||||||
body.get("temperature") if body.get("temperature") is not None else 0.8
|
|
||||||
)
|
|
||||||
top_k = body.get("top_k") if body.get("top_k") is not None else 40
|
|
||||||
top_p = body.get("top_p") if body.get("top_p") is not None else 0.9
|
|
||||||
stop_sequences = body.get("stop") if body.get("stop") is not None else []
|
|
||||||
|
|
||||||
response = self.client.messages.create(
|
|
||||||
**{
|
|
||||||
"model": model_id,
|
|
||||||
**(
|
|
||||||
{"system": system_message} if system_message else {}
|
|
||||||
), # Add system message if it exists (optional
|
|
||||||
"messages": messages,
|
|
||||||
"max_tokens": max_tokens,
|
|
||||||
"temperature": temperature,
|
|
||||||
"top_k": top_k,
|
|
||||||
"top_p": top_p,
|
|
||||||
"stop_sequences": stop_sequences,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
return response.content[0].text
|
return response.content[0].text
|
Loading…
Reference in New Issue
Block a user