Merge pull request #70 from ltn18/gpt4o

[feature] Tree-of-Thought with GPT-4o
This commit is contained in:
Shunyu Yao 2025-01-16 12:02:00 -08:00 committed by GitHub
commit 8050e67d0e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 6 additions and 4 deletions

2
run.py
View File

@ -42,7 +42,7 @@ def run(args):
def parse_args(): def parse_args():
args = argparse.ArgumentParser() args = argparse.ArgumentParser()
args.add_argument('--backend', type=str, choices=['gpt-4', 'gpt-3.5-turbo'], default='gpt-4') args.add_argument('--backend', type=str, choices=['gpt-4', 'gpt-3.5-turbo', 'gpt-4o'], default='gpt-4')
args.add_argument('--temperature', type=float, default=0.7) args.add_argument('--temperature', type=float, default=0.7)
args.add_argument('--task', type=str, required=True, choices=['game24', 'text', 'crosswords']) args.add_argument('--task', type=str, required=True, choices=['game24', 'text', 'crosswords'])

View File

@ -30,10 +30,10 @@ def chatgpt(messages, model="gpt-4", temperature=0.7, max_tokens=1000, n=1, stop
cnt = min(n, 20) cnt = min(n, 20)
n -= cnt n -= cnt
res = completions_with_backoff(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, n=cnt, stop=stop) res = completions_with_backoff(model=model, messages=messages, temperature=temperature, max_tokens=max_tokens, n=cnt, stop=stop)
outputs.extend([choice["message"]["content"] for choice in res["choices"]]) outputs.extend([choice.message.content for choice in res.choices])
# log completion tokens # log completion tokens
completion_tokens += res["usage"]["completion_tokens"] completion_tokens += res.usage.completion_tokens
prompt_tokens += res["usage"]["prompt_tokens"] prompt_tokens += res.usage.prompt_tokens
return outputs return outputs
def gpt_usage(backend="gpt-4"): def gpt_usage(backend="gpt-4"):
@ -42,4 +42,6 @@ def gpt_usage(backend="gpt-4"):
cost = completion_tokens / 1000 * 0.06 + prompt_tokens / 1000 * 0.03 cost = completion_tokens / 1000 * 0.06 + prompt_tokens / 1000 * 0.03
elif backend == "gpt-3.5-turbo": elif backend == "gpt-3.5-turbo":
cost = completion_tokens / 1000 * 0.002 + prompt_tokens / 1000 * 0.0015 cost = completion_tokens / 1000 * 0.002 + prompt_tokens / 1000 * 0.0015
elif backend == "gpt-4o":
cost = completion_tokens / 1000 * 0.00250 + prompt_tokens / 1000 * 0.01
return {"completion_tokens": completion_tokens, "prompt_tokens": prompt_tokens, "cost": cost} return {"completion_tokens": completion_tokens, "prompt_tokens": prompt_tokens, "cost": cost}