mirror of
https://github.com/open-webui/pipelines
synced 2025-05-12 08:30:43 +00:00
Handle more params
This commit is contained in:
parent
830ae49f09
commit
940d91c216
@ -23,6 +23,7 @@ class Pipeline:
|
|||||||
self.process = None
|
self.process = None
|
||||||
self.model = os.getenv('MLX_MODEL', 'mistralai/Mistral-7B-Instruct-v0.2') # Default model if not set in environment variable
|
self.model = os.getenv('MLX_MODEL', 'mistralai/Mistral-7B-Instruct-v0.2') # Default model if not set in environment variable
|
||||||
self.port = self.find_free_port()
|
self.port = self.find_free_port()
|
||||||
|
self.stop_sequences = os.getenv('MLX_STOP', None) # Stop sequences from environment variable
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def find_free_port():
|
def find_free_port():
|
||||||
@ -70,7 +71,14 @@ class Pipeline:
|
|||||||
print(f"get_response:{__name__}")
|
print(f"get_response:{__name__}")
|
||||||
|
|
||||||
MLX_BASE_URL = f"http://localhost:{self.port}"
|
MLX_BASE_URL = f"http://localhost:{self.port}"
|
||||||
MODEL = "llama3"
|
MODEL = self.model
|
||||||
|
|
||||||
|
# Extract additional parameters from the body
|
||||||
|
temperature = body.get("temperature", 1.0)
|
||||||
|
max_tokens = body.get("max_tokens", 100)
|
||||||
|
top_p = body.get("top_p", 1.0)
|
||||||
|
repetition_penalty = body.get("repetition_penalty", 1.0)
|
||||||
|
stop = self.stop_sequences
|
||||||
|
|
||||||
if "user" in body:
|
if "user" in body:
|
||||||
print("######################################")
|
print("######################################")
|
||||||
@ -78,18 +86,26 @@ class Pipeline:
|
|||||||
print(f"# Message: {user_message}")
|
print(f"# Message: {user_message}")
|
||||||
print("######################################")
|
print("######################################")
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"model": MODEL,
|
||||||
|
"messages": messages,
|
||||||
|
"temperature": temperature,
|
||||||
|
"max_tokens": max_tokens,
|
||||||
|
"top_p": top_p,
|
||||||
|
"repetition_penalty": repetition_penalty,
|
||||||
|
"stop": stop,
|
||||||
|
"stream": True # Always stream responses
|
||||||
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
url=f"{MLX_BASE_URL}/v1/chat/completions",
|
url=f"{MLX_BASE_URL}/v1/chat/completions",
|
||||||
json={**body, "model": MODEL},
|
json=payload,
|
||||||
stream=True,
|
stream=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
|
|
||||||
if body["stream"]:
|
|
||||||
return r.iter_lines()
|
return r.iter_lines()
|
||||||
else:
|
|
||||||
return r.json()
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return f"Error: {e}"
|
return f"Error: {e}"
|
Loading…
Reference in New Issue
Block a user