mirror of
https://github.com/deepseek-ai/DeepSeek-VL
synced 2024-11-22 03:17:39 +00:00
349 lines
11 KiB
Python
349 lines
11 KiB
Python
# Copyright (c) 2023-2024 DeepSeek.
|
||
#
|
||
# Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||
# this software and associated documentation files (the "Software"), to deal in
|
||
# the Software without restriction, including without limitation the rights to
|
||
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||
# the Software, and to permit persons to whom the Software is furnished to do so,
|
||
# subject to the following conditions:
|
||
#
|
||
# The above copyright notice and this permission notice shall be included in all
|
||
# copies or substantial portions of the Software.
|
||
#
|
||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||
|
||
"""
|
||
From https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py
|
||
"""
|
||
|
||
import dataclasses
|
||
from enum import IntEnum, auto
|
||
from typing import Dict, List
|
||
|
||
|
||
class SeparatorStyle(IntEnum):
|
||
"""Separator styles."""
|
||
|
||
ADD_COLON_SINGLE = auto()
|
||
ADD_COLON_TWO = auto()
|
||
ADD_COLON_SPACE_SINGLE = auto()
|
||
NO_COLON_SINGLE = auto()
|
||
NO_COLON_TWO = auto()
|
||
ADD_NEW_LINE_SINGLE = auto()
|
||
LLAMA2 = auto()
|
||
CHATGLM = auto()
|
||
CHATML = auto()
|
||
CHATINTERN = auto()
|
||
DOLLY = auto()
|
||
RWKV = auto()
|
||
PHOENIX = auto()
|
||
ROBIN = auto()
|
||
DeepSeek = auto()
|
||
PLAIN = auto()
|
||
ALIGNMENT = auto()
|
||
|
||
|
||
@dataclasses.dataclass
|
||
class Conversation:
|
||
"""A class that manages prompt templates and keeps all conversation history."""
|
||
|
||
# The name of this template
|
||
name: str
|
||
# The template of the system prompt
|
||
system_template: str = "{system_message}"
|
||
# The system message
|
||
system_message: str = ""
|
||
# The names of two roles
|
||
roles: List[str] = (("USER", "ASSISTANT"),)
|
||
# All messages. Each item is (role, message).
|
||
messages: List[List[str]] = ()
|
||
# The number of few shot examples
|
||
offset: int = 0
|
||
# The separator style and configurations
|
||
sep_style: SeparatorStyle = SeparatorStyle.ADD_COLON_SINGLE
|
||
sep: str = "\n"
|
||
sep2: str = None
|
||
# Stop criteria (the default one is EOS token)
|
||
stop_str: str = None
|
||
# Stops generation if meeting any token in this list
|
||
stop_token_ids: List[int] = None
|
||
|
||
def get_prompt(self) -> str:
|
||
"""Get the prompt for generation."""
|
||
system_prompt = self.system_template.format(system_message=self.system_message)
|
||
|
||
if self.sep_style == SeparatorStyle.DeepSeek:
|
||
seps = [self.sep, self.sep2]
|
||
if system_prompt == "" or system_prompt is None:
|
||
ret = ""
|
||
else:
|
||
ret = system_prompt + seps[0]
|
||
for i, (role, message) in enumerate(self.messages):
|
||
if message:
|
||
ret += role + ": " + message + seps[i % 2]
|
||
else:
|
||
ret += role + ":"
|
||
return ret
|
||
elif self.sep_style == SeparatorStyle.LLAMA2:
|
||
seps = [self.sep, self.sep2]
|
||
if self.system_message:
|
||
ret = system_prompt
|
||
else:
|
||
ret = "[INST] "
|
||
for i, (role, message) in enumerate(self.messages):
|
||
tag = self.roles[i % 2]
|
||
if message:
|
||
if type(message) is tuple: # multimodal message
|
||
message, _ = message
|
||
if i == 0:
|
||
ret += message + " "
|
||
else:
|
||
ret += tag + " " + message + seps[i % 2]
|
||
else:
|
||
ret += tag
|
||
return ret
|
||
elif self.sep_style == SeparatorStyle.PLAIN:
|
||
seps = [self.sep, self.sep2]
|
||
ret = ""
|
||
for i, (role, message) in enumerate(self.messages):
|
||
if message:
|
||
if type(message) is tuple:
|
||
message, _, _ = message
|
||
if i % 2 == 0:
|
||
ret += message + seps[i % 2]
|
||
else:
|
||
ret += message + seps[i % 2]
|
||
else:
|
||
ret += ""
|
||
return ret
|
||
elif self.sep_style == SeparatorStyle.ALIGNMENT:
|
||
seps = [self.sep, self.sep2]
|
||
ret = ""
|
||
for i, (role, message) in enumerate(self.messages):
|
||
if message:
|
||
if type(message) is tuple:
|
||
message, _, _ = message
|
||
if i % 2 == 0:
|
||
ret += "<image>\n" + seps[i % 2]
|
||
else:
|
||
ret += message + seps[i % 2]
|
||
else:
|
||
ret += ""
|
||
return ret
|
||
else:
|
||
raise ValueError(f"Invalid style: {self.sep_style}")
|
||
|
||
def get_prompt_for_current_round(self, content=None):
|
||
"""Get current round formatted question prompt during sft training"""
|
||
if self.sep_style == SeparatorStyle.PLAIN:
|
||
formatted_question = "<image>\n"
|
||
elif self.sep_style == SeparatorStyle.DeepSeek:
|
||
formatted_question = (
|
||
f"{self.roles[0]}: " + content.strip() + self.sep + f"{self.roles[1]}:"
|
||
)
|
||
else:
|
||
raise ValueError(f"Unsupported sep_style: {self.sep_style}")
|
||
return formatted_question
|
||
|
||
def set_system_message(self, system_message: str):
|
||
"""Set the system message."""
|
||
self.system_message = system_message
|
||
|
||
def append_message(self, role: str, message: str):
|
||
"""Append a new message."""
|
||
self.messages.append([role, message])
|
||
|
||
def reset_message(self):
|
||
"""Reset a new message."""
|
||
self.messages = []
|
||
|
||
def update_last_message(self, message: str):
|
||
"""Update the last output.
|
||
|
||
The last message is typically set to be None when constructing the prompt,
|
||
so we need to update it in-place after getting the response from a model.
|
||
"""
|
||
self.messages[-1][1] = message
|
||
|
||
def to_gradio_chatbot(self):
|
||
"""Convert the conversation to gradio chatbot format."""
|
||
ret = []
|
||
for i, (role, msg) in enumerate(self.messages[self.offset :]):
|
||
if i % 2 == 0:
|
||
ret.append([msg, None])
|
||
else:
|
||
ret[-1][-1] = msg
|
||
return ret
|
||
|
||
def to_openai_api_messages(self):
|
||
"""Convert the conversation to OpenAI chat completion format."""
|
||
system_prompt = self.system_template.format(system_message=self.system_message)
|
||
ret = [{"role": "system", "content": system_prompt}]
|
||
|
||
for i, (_, msg) in enumerate(self.messages[self.offset :]):
|
||
if i % 2 == 0:
|
||
ret.append({"role": "user", "content": msg})
|
||
else:
|
||
if msg is not None:
|
||
ret.append({"role": "assistant", "content": msg})
|
||
return ret
|
||
|
||
def copy(self):
|
||
return Conversation(
|
||
name=self.name,
|
||
system_template=self.system_template,
|
||
system_message=self.system_message,
|
||
roles=self.roles,
|
||
messages=[[x, y] for x, y in self.messages],
|
||
offset=self.offset,
|
||
sep_style=self.sep_style,
|
||
sep=self.sep,
|
||
sep2=self.sep2,
|
||
stop_str=self.stop_str,
|
||
stop_token_ids=self.stop_token_ids,
|
||
)
|
||
|
||
def dict(self):
|
||
return {
|
||
"template_name": self.name,
|
||
"system_message": self.system_message,
|
||
"roles": self.roles,
|
||
"messages": self.messages,
|
||
"offset": self.offset,
|
||
}
|
||
|
||
|
||
# A global registry for all conversation templates
|
||
conv_templates: Dict[str, Conversation] = {}
|
||
|
||
|
||
def register_conv_template(template: Conversation, override: bool = False):
|
||
"""Register a new conversation template."""
|
||
if not override:
|
||
assert (
|
||
template.name not in conv_templates
|
||
), f"{template.name} has been registered."
|
||
|
||
conv_templates[template.name] = template
|
||
|
||
|
||
def get_conv_template(name: str) -> Conversation:
|
||
"""Get a conversation template."""
|
||
return conv_templates[name].copy()
|
||
|
||
|
||
# llava_llama2 template
|
||
register_conv_template(
|
||
Conversation(
|
||
name="llava_llama2",
|
||
system_message="You are a helpful language and vision assistant. "
|
||
"You are able to understand the visual content that the user provides, "
|
||
"and assist the user with a variety of tasks using natural language.",
|
||
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
||
roles=("[INST]", "[/INST]"),
|
||
messages=(),
|
||
offset=0,
|
||
sep_style=SeparatorStyle.LLAMA2,
|
||
sep=" ",
|
||
sep2=" </s><s>",
|
||
stop_token_ids=[2],
|
||
)
|
||
)
|
||
|
||
# llama2 template
|
||
# reference: https://github.com/facebookresearch/llama/blob/cfc3fc8c1968d390eb830e65c63865e980873a06/llama/generation.py#L212
|
||
register_conv_template(
|
||
Conversation(
|
||
name="llama-2",
|
||
system_template="[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n",
|
||
roles=("[INST]", "[/INST]"),
|
||
messages=(),
|
||
offset=0,
|
||
sep_style=SeparatorStyle.LLAMA2,
|
||
sep=" ",
|
||
sep2=" </s><s>",
|
||
stop_token_ids=[2],
|
||
)
|
||
)
|
||
|
||
|
||
# deepseek template
|
||
register_conv_template(
|
||
Conversation(
|
||
name="deepseek",
|
||
system_template="{system_message}",
|
||
# system_message="You are a helpful assistant. Please answer truthfully and write out your "
|
||
# "thinking step by step to be sure you get the right answer.",
|
||
system_message="",
|
||
roles=("User", "Assistant"),
|
||
messages=(),
|
||
offset=0,
|
||
sep_style=SeparatorStyle.DeepSeek,
|
||
sep="\n\n",
|
||
sep2="<|end▁of▁sentence|>",
|
||
stop_token_ids=[100001],
|
||
stop_str=["User:", "<|end▁of▁sentence|>"],
|
||
)
|
||
)
|
||
|
||
register_conv_template(
|
||
Conversation(
|
||
name="plain",
|
||
system_template="",
|
||
system_message="",
|
||
roles=("", ""),
|
||
messages=(),
|
||
offset=0,
|
||
sep_style=SeparatorStyle.PLAIN,
|
||
sep="",
|
||
sep2="",
|
||
stop_token_ids=[2],
|
||
stop_str=["</s>"],
|
||
)
|
||
)
|
||
|
||
|
||
register_conv_template(
|
||
Conversation(
|
||
name="alignment",
|
||
system_template="",
|
||
system_message="",
|
||
roles=("", ""),
|
||
messages=(),
|
||
offset=0,
|
||
sep_style=SeparatorStyle.ALIGNMENT,
|
||
sep="",
|
||
sep2="",
|
||
stop_token_ids=[2],
|
||
stop_str=["</s>"],
|
||
)
|
||
)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
# print("Llama-2 template:")
|
||
# conv = get_conv_template("llama-2")
|
||
# conv.set_system_message("You are a helpful, respectful and honest assistant.")
|
||
# conv.append_message(conv.roles[0], "Hello!")
|
||
# conv.append_message(conv.roles[1], "Hi!")
|
||
# conv.append_message(conv.roles[0], "How are you?")
|
||
# conv.append_message(conv.roles[1], None)
|
||
# print(conv.get_prompt())
|
||
|
||
# print("\n")
|
||
|
||
print("deepseek template:")
|
||
conv = get_conv_template("deepseek")
|
||
conv.append_message(conv.roles[0], "Hello!")
|
||
conv.append_message(conv.roles[1], "Hi! This is Tony.")
|
||
conv.append_message(conv.roles[0], "Who are you?")
|
||
conv.append_message(conv.roles[1], "I am a helpful assistant.")
|
||
conv.append_message(conv.roles[0], "How are you?")
|
||
conv.append_message(conv.roles[1], None)
|
||
print(conv.get_prompt())
|