diff --git a/models.py b/models.py index 894ed89..05710d6 100644 --- a/models.py +++ b/models.py @@ -4,6 +4,17 @@ import backoff completion_tokens = prompt_tokens = 0 +api_key = os.getenv("OPENAI_API_KEY", "") +if api_key != "": + openai.api_key = api_key +else: + print("Warning: OPENAI_API_KEY is not set") + +api_base = os.getenv("OPENAI_API_BASE", "") +if api_base != "": + print("Warning: OPENAI_API_BASE is set to {}".format(api_base)) + openai.api_base = api_base + @backoff.on_exception(backoff.expo, openai.error.OpenAIError) def completions_with_backoff(**kwargs): return openai.ChatCompletion.create(**kwargs) diff --git a/readme.md b/readme.md index 7dd8250..b309599 100644 --- a/readme.md +++ b/readme.md @@ -7,8 +7,16 @@ Also check [its tweet thread](https://twitter.com/ShunyuYao12/status/16593575474 ## Setup You need to first have an OpenAI API key and store it in the environment variable ``OPENAI_API_KEY`` (see [here](https://help.openai.com/en/articles/5112595-best-practices-for-api-key-safety)). +If you use custom base url, set it by environment variable ``OPENAI_API_BASE`` (e.g. "https://api.openai.com/v1") + +Note GPT-4 model is farily slow, please be patient + Package requirement: ``openai``, ``backoff``, ``sympy``, ``numpy``. +``` +pip install openai backoff sympy numpy +``` + ## Experiments