mirror of
https://github.com/open-webui/pipelines
synced 2025-06-26 18:15:58 +00:00
Add logging
This commit is contained in:
parent
50132989d0
commit
c44217b5b8
@ -2,11 +2,15 @@
|
|||||||
|
|
||||||
from typing import List, Union, Iterator
|
from typing import List, Union, Iterator
|
||||||
import os
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
import google.generativeai as genai
|
import google.generativeai as genai
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class Pipeline:
|
class Pipeline:
|
||||||
"""Google GenAI pipeline"""
|
"""Google GenAI pipeline"""
|
||||||
@ -72,7 +76,8 @@ class Pipeline:
|
|||||||
def pipe(
|
def pipe(
|
||||||
self, user_message: str, model_id: str, messages: List[dict], body: dict
|
self, user_message: str, model_id: str, messages: List[dict], body: dict
|
||||||
) -> Union[str, Iterator]:
|
) -> Union[str, Iterator]:
|
||||||
print(f"pipe:{__name__}")
|
logger.info(f"Pipe function called for model: {model_id}")
|
||||||
|
logger.info(f"Stream mode: {body['stream']}")
|
||||||
|
|
||||||
system_prompt = None
|
system_prompt = None
|
||||||
google_messages = []
|
google_messages = []
|
||||||
@ -87,12 +92,10 @@ class Pipeline:
|
|||||||
try:
|
try:
|
||||||
content = message.get("content", "")
|
content = message.get("content", "")
|
||||||
if isinstance(content, list):
|
if isinstance(content, list):
|
||||||
# Handle potential multi-modal content
|
|
||||||
parts = []
|
parts = []
|
||||||
for item in content:
|
for item in content:
|
||||||
if item["type"] == "text":
|
if item["type"] == "text":
|
||||||
parts.append({"text": item["text"]})
|
parts.append({"text": item["text"]})
|
||||||
# Add handling for other content types if necessary
|
|
||||||
else:
|
else:
|
||||||
parts = [{"text": content}]
|
parts = [{"text": content}]
|
||||||
|
|
||||||
@ -101,9 +104,8 @@ class Pipeline:
|
|||||||
"parts": parts
|
"parts": parts
|
||||||
})
|
})
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error processing message: {e}")
|
logger.error(f"Error processing message: {e}")
|
||||||
print(f"Problematic message: {message}")
|
logger.error(f"Problematic message: {message}")
|
||||||
# You might want to skip this message or handle the error differently
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
model = genai.GenerativeModel(
|
model = genai.GenerativeModel(
|
||||||
@ -112,7 +114,7 @@ class Pipeline:
|
|||||||
temperature=body.get("temperature", 0.7),
|
temperature=body.get("temperature", 0.7),
|
||||||
top_p=body.get("top_p", 1.0),
|
top_p=body.get("top_p", 1.0),
|
||||||
top_k=body.get("top_k", 1),
|
top_k=body.get("top_k", 1),
|
||||||
max_output_tokens=body.get("max_tokens", 8192),
|
max_output_tokens=body.get("max_tokens", 1024),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -122,12 +124,16 @@ class Pipeline:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if body["stream"]:
|
if body["stream"]:
|
||||||
|
logger.info("Streaming response")
|
||||||
for chunk in response:
|
for chunk in response:
|
||||||
yield chunk.text
|
yield chunk.text
|
||||||
return ""
|
return ""
|
||||||
|
else:
|
||||||
return response.text
|
logger.info("Non-streaming response")
|
||||||
|
result = response.text
|
||||||
|
logger.info(f"Generated content: {result}")
|
||||||
|
return result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error generating content: {e}")
|
logger.error(f"Error generating content: {e}")
|
||||||
return f"An error occurred: {str(e)}"
|
return f"An error occurred: {str(e)}"
|
||||||
|
Loading…
Reference in New Issue
Block a user