Skip to content

Commit

Permalink
rm unnecessary return_for_text_completion
Browse files Browse the repository at this point in the history
  • Loading branch information
leloykun committed Jul 23, 2024
1 parent 2aba534 commit dba8d08
Showing 1 changed file with 3 additions and 20 deletions.
23 changes: 3 additions & 20 deletions docs/source/en/model_doc/chameleon.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,12 +82,7 @@ url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
image = Image.open(requests.get(url, stream=True).raw)
prompt = "What do you see in this image?<image>"

inputs = processor(
prompt,
image,
return_tensors="pt",
return_for_text_completion=True,
).to(model.device)
inputs = processor(prompt, image, return_tensors="pt").to(model.device)

# autoregressively complete prompt
output = model.generate(**inputs, max_new_tokens=50)
Expand Down Expand Up @@ -130,7 +125,6 @@ inputs = processor(
images=[image_stop, image_cats, image_snowman],
padding=True,
return_tensors="pt",
return_for_text_completion=True,
).to(device="cuda", dtype=torch.float16)

# Generate
Expand All @@ -157,12 +151,7 @@ model = ChameleonForConditionalGeneration.from_pretrained(
prompt = "Generate an image of a snowman."

# Preprocess the prompt
inputs = processor(
prompt,
padding=True,
return_tensors="pt",
return_for_text_completion=True,
).to(model.device)
inputs = processor(prompt, padding=True, return_tensors="pt").to(model.device)

# Generate discrete image tokens
generate_ids = model.generate(
Expand Down Expand Up @@ -217,7 +206,6 @@ inputs = processor(
images=[image_snowman],
padding=True,
return_tensors="pt",
return_for_text_completion=True,
).to(model.device)

# Generate discrete image tokens
Expand Down Expand Up @@ -262,12 +250,7 @@ model = ChameleonForConditionalGeneration.from_pretrained(
prompt = "Can you draw a snowman and explain how to build one?"

# Preprocess the prompt
inputs = processor(
prompt,
padding=True,
return_tensors="pt",
return_for_text_completion=True,
).to(model.device)
inputs = processor(prompt, padding=True, return_tensors="pt").to(model.device)

# Generate interleaved text and discrete image tokens
generate_ids = model.generate(
Expand Down

0 comments on commit dba8d08

Please sign in to comment.