@@ -124,16 +124,27 @@ def run_minicpmv(question):
124124
125125# InternVL
126126def run_internvl (question ):
127- # Generally, InternVL can use chatml template for conversation
128- TEMPLATE = "<|im_start|>User\n {prompt}<|im_end|>\n <|im_start|>Assistant\n "
129- prompt = f"<image>\n { question } \n "
130- prompt = TEMPLATE .format (prompt = prompt )
127+ model_name = "OpenGVLab/InternVL2-2B"
128+
131129 llm = LLM (
132- model = "OpenGVLab/InternVL2-4B" ,
130+ model = model_name ,
133131 trust_remote_code = True ,
134132 max_num_seqs = 5 ,
135133 )
136- stop_token_ids = None
134+
135+ tokenizer = AutoTokenizer .from_pretrained (model_name ,
136+ trust_remote_code = True )
137+ messages = [{'role' : 'user' , 'content' : f"<image>\n { question } " }]
138+ prompt = tokenizer .apply_chat_template (messages ,
139+ tokenize = False ,
140+ add_generation_prompt = True )
141+
142+ # Stop tokens for InternVL
143+ # models variants may have different stop tokens
144+ # please refer to the model card for the correct "stop words":
145+ # https://huggingface.co/OpenGVLab/InternVL2-2B#service
146+ stop_tokens = ["<|endoftext|>" , "<|im_start|>" , "<|im_end|>" , "<|end|>" ]
147+ stop_token_ids = [tokenizer .convert_tokens_to_ids (i ) for i in stop_tokens ]
137148 return llm , prompt , stop_token_ids
138149
139150
0 commit comments