mirror of
https://github.com/princeton-nlp/tree-of-thought-llm
synced 2025-04-26 00:48:56 +00:00
adding support for multiple n for gpt
This commit is contained in:
parent
a8c76b3d5c
commit
5796bd562c
@ -35,7 +35,7 @@ def get_votes(task, x, ys, n_evaluate_sample):
|
|||||||
|
|
||||||
def get_proposals(task, x, y):
|
def get_proposals(task, x, y):
|
||||||
propose_prompt = task.propose_prompt_wrap(x, y)
|
propose_prompt = task.propose_prompt_wrap(x, y)
|
||||||
proposals = inference_model(propose_prompt, n=1, stop=None)[0].split('\n')
|
proposals = inference_model(propose_prompt, n=5, stop=None)[0].split('\n')
|
||||||
|
|
||||||
return [y + _ + '\n' for _ in proposals]
|
return [y + _ + '\n' for _ in proposals]
|
||||||
|
|
||||||
|
@ -69,13 +69,13 @@ def chatgpt(model, messages, temperature=0.7, max_tokens=1000, n=5, stop=None) -
|
|||||||
global completion_tokens, prompt_tokens
|
global completion_tokens, prompt_tokens
|
||||||
outputs = []
|
outputs = []
|
||||||
client = OpenAI()
|
client = OpenAI()
|
||||||
|
|
||||||
while n > 0:
|
while n > 0:
|
||||||
cnt = min(n, 20)
|
cnt = min(n, 20)
|
||||||
n -= cnt
|
n -= cnt
|
||||||
|
|
||||||
res = client.chat.completions.create(model=model, messages=messages, temperature=temperature, n=cnt, stop=stop)
|
res = client.chat.completions.create(model=model, messages=messages, temperature=temperature, n=cnt, stop=stop)
|
||||||
res_answer = res.choices[0].message.content #answers get returned in a single message.content string even when n > 1; need to double check on why later
|
outputs.extend([choice.message.content for choice in res.choices])
|
||||||
outputs.extend([res_answer])
|
|
||||||
|
|
||||||
# log completion tokens
|
# log completion tokens
|
||||||
completion_tokens += res.usage.completion_tokens
|
completion_tokens += res.usage.completion_tokens
|
||||||
|
Loading…
Reference in New Issue
Block a user