Add permissive safety valve to Gemini Manifold

This commit is contained in:
Justin Hayes 2024-08-01 14:21:29 -04:00 committed by GitHub
parent c76d24b032
commit 7b784bcdfe
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -2,7 +2,7 @@
title: Google GenAI Manifold Pipeline title: Google GenAI Manifold Pipeline
author: Marc Lopez (refactor by justinh-rahb) author: Marc Lopez (refactor by justinh-rahb)
date: 2024-06-06 date: 2024-06-06
version: 1.1 version: 1.2
license: MIT license: MIT
description: A pipeline for generating text using Google's GenAI models in Open-WebUI. description: A pipeline for generating text using Google's GenAI models in Open-WebUI.
requirements: google-generativeai requirements: google-generativeai
@ -12,7 +12,7 @@ environment_variables: GOOGLE_API_KEY
from typing import List, Union, Iterator from typing import List, Union, Iterator
import os import os
from pydantic import BaseModel from pydantic import BaseModel, Field
import google.generativeai as genai import google.generativeai as genai
from google.generativeai.types import GenerationConfig from google.generativeai.types import GenerationConfig
@ -25,13 +25,17 @@ class Pipeline:
"""Options to change from the WebUI""" """Options to change from the WebUI"""
GOOGLE_API_KEY: str = "" GOOGLE_API_KEY: str = ""
USE_PERMISSIVE_SAFETY: bool = Field(default=False)
def __init__(self): def __init__(self):
self.type = "manifold" self.type = "manifold"
self.id = "google_genai" self.id = "google_genai"
self.name = "Google: " self.name = "Google: "
self.valves = self.Valves(**{"GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", "")}) self.valves = self.Valves(**{
"GOOGLE_API_KEY": os.getenv("GOOGLE_API_KEY", ""),
"USE_PERMISSIVE_SAFETY": False
})
self.pipelines = [] self.pipelines = []
genai.configure(api_key=self.valves.GOOGLE_API_KEY) genai.configure(api_key=self.valves.GOOGLE_API_KEY)
@ -135,6 +139,14 @@ class Pipeline:
stop_sequences=body.get("stop", []), stop_sequences=body.get("stop", []),
) )
if self.valves.USE_PERMISSIVE_SAFETY:
safety_settings = {
genai.types.HarmCategory.HARM_CATEGORY_HARASSMENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_HATE_SPEECH: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: genai.types.HarmBlockThreshold.BLOCK_NONE,
genai.types.HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: genai.types.HarmBlockThreshold.BLOCK_NONE,
}
else:
safety_settings = body.get("safety_settings") safety_settings = body.get("safety_settings")
response = model.generate_content( response = model.generate_content(