remove max_tokens from the official version of gpt4-turbo
This commit is contained in:
parent
9b2cb1e1c3
commit
dd4648ed9a
|
@ -129,7 +129,7 @@ export class ChatGPTApi implements LLMApi {
|
||||||
};
|
};
|
||||||
|
|
||||||
// add max_tokens to vision model
|
// add max_tokens to vision model
|
||||||
if (visionModel) {
|
if (visionModel && modelConfig.model.includes("preview")) {
|
||||||
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue