fix add_generation_prompt in latest version

This commit is contained in:
Dejian Yang 2024-01-09 21:55:21 +08:00
parent d3bb741e25
commit c160d96860
2 changed files with 2 additions and 2 deletions

View File

@ -39,7 +39,7 @@ def generate_one(example, lang, tokenizer, model):
eos_token_id=stop_id
)
output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)
output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True, add_generation_prompt=True)
example['output'] = output
return extract_generation_code(example, lang_code=lang)

View File

@ -66,7 +66,7 @@ def generate_one(example, tokenizer, model):
prompt = example['prompt']
inputs = tokenizer.apply_chat_template(
[{'role': 'user', 'content': prompt }],
return_tensors="pt"
return_tensors="pt", add_generation_prompt=True
).to(model.device)
stop_id = tokenizer.convert_tokens_to_ids("<|EOT|>")