Skip to content

Commit

Permalink
Update chat template for qwen and yi-1.5. (#530)
Browse files Browse the repository at this point in the history
  • Loading branch information
for-just-we committed Jul 9, 2024
1 parent 740c46a commit d557e9f
Showing 1 changed file with 41 additions and 6 deletions.
47 changes: 41 additions & 6 deletions python/sglang/lang/chat_template.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def get_chat_template_by_model_path(model_path):
"system": ("SYSTEM:", "\n"),
"user": ("USER:", "\n"),
"assistant": ("ASSISTANT:", "\n"),
},
}
)
)

Expand Down Expand Up @@ -116,6 +116,23 @@ def get_chat_template_by_model_path(model_path):
)
)

# There is default system prompt for qwen
# reference: https://modelscope.cn/models/qwen/Qwen2-72B-Instruct/file/view/master?fileName=tokenizer_config.json&status=1
# The chat template is: "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
register_chat_template(
ChatTemplate(
name="qwen",
default_system_prompt="You are a helpful assistant.",
role_prefix_and_suffix={
"system": ("<|im_start|>system\n", "<|im_end|>\n"),
"user": ("<|im_start|>user\n", "<|im_end|>\n"),
"assistant": ("<|im_start|>assistant\n", "<|im_end|>\n"),
},
style=ChatTemplateStyle.PLAIN,
stop_str=("<|im_end|>",),
)
)


register_chat_template(
ChatTemplate(
Expand All @@ -132,6 +149,7 @@ def get_chat_template_by_model_path(model_path):
)
)

# Reference: https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md#prompt-template
register_chat_template(
ChatTemplate(
name="vicuna_v1.1",
Expand All @@ -148,6 +166,20 @@ def get_chat_template_by_model_path(model_path):
)
)

# Reference: https://modelscope.cn/models/01ai/Yi-1.5-34B-Chat/file/view/master?fileName=tokenizer_config.json&status=1
register_chat_template(
ChatTemplate(
name="yi-1.5",
default_system_prompt=None,
role_prefix_and_suffix={
"system": ("", ""),
"user": ("<|im_start|>user\n", "<|im_end|>\n<|im_start|>assistant\n"),
"assistant": ("", "<|im_end|>\n"),
},
style=ChatTemplateStyle.PLAIN,
stop_str=("<|im_end|>",)
)
)

register_chat_template(
ChatTemplate(
Expand Down Expand Up @@ -187,7 +219,7 @@ def get_chat_template_by_model_path(model_path):
# Reference: https://github.com/01-ai/Yi/tree/main/VL#major-difference-with-llava
register_chat_template(
ChatTemplate(
name="yi",
name="yi-vl",
default_system_prompt=(
"This is a chat between an inquisitive human and an AI assistant. Assume the role of the AI assistant. Read all the images carefully, and respond to the human's questions with informative, helpful, detailed and polite answers."
"这是一个好奇的人类和一个人工智能助手之间的对话。假设你扮演这个AI助手的角色。仔细阅读所有的图像,并对人类的问题做出信息丰富、有帮助、详细的和礼貌的回答。"
Expand Down Expand Up @@ -289,8 +321,9 @@ def match_chat_ml(model_path: str):
model_path = model_path.lower()
if "tinyllama" in model_path:
return get_chat_template("chatml")
if "qwen" in model_path and "chat" in model_path:
return get_chat_template("chatml")
# Now the suffix for qwen2 chat model is "instruct"
if "qwen" in model_path and ("chat" in model_path or "instruct" in model_path):
return get_chat_template("qwen")
if (
"llava-v1.6-34b" in model_path
or "llava-v1.6-yi-34b" in model_path
Expand All @@ -302,8 +335,10 @@ def match_chat_ml(model_path: str):
@register_chat_template_matching_function
def match_chat_yi(model_path: str):
model_path = model_path.lower()
if "yi" in model_path and "llava" not in model_path:
return get_chat_template("yi")
if "yi-vl" in model_path and "llava" not in model_path:
return get_chat_template("yi-vl")
elif "yi-1.5" in model_path and "chat" in model_path:
return get_chat_template("yi-1.5")


@register_chat_template_matching_function
Expand Down

0 comments on commit d557e9f

Please sign in to comment.