Skip to content

Commit

Permalink
print statement
Browse files Browse the repository at this point in the history
  • Loading branch information
peytontolbert committed Jan 30, 2024
1 parent 925bc42 commit 3b75f66
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 10 deletions.
7 changes: 4 additions & 3 deletions vision_datasets/functioncallgenerate2.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import torch
import json
from accelerate import PartialState
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import re
Expand All @@ -19,11 +20,11 @@
)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
device_map=("cuda:0"),
device_map=("cuda"),
torch_dtype=torch.float16,
trust_remote_code=True,
use_safetensors=True
)

# File to store the responses
functions_file = "functions.json"

Expand Down Expand Up @@ -101,7 +102,7 @@ def expand_qa(features):
) # Extract everything after "assistant"
else:
response = "No response found after 'assistant'."

print(response)
return response


Expand Down
26 changes: 19 additions & 7 deletions vision_datasets/functioncallgenerate3.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import torch
import json
import torch.distributed as dist
import torch.multiprocessing as mp
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
import re
Expand All @@ -19,16 +21,19 @@
)
model = AutoModelForCausalLM.from_pretrained(
model_name_or_path,
device_map=("auto"),
device_map=("cuda:0"),
torch_dtype=torch.float16,
trust_remote_code=True,
use_safetensors=True
)

# File to store the responses
functions_file = "functions.json"
rank = 2
world_size = 2


def expand_qa(features):
def expand_qa(features, rank, world_size):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
prompt = f"""{features}"""
system_message = """When presented with features described by a visual language model, synthesize a function call and generate its output. The function call should be structured to capture specific attributes of the image as detailed by the visual description. Start the function call with the <fn_call> tag and then provide the expected output in JSON format.
Expand Down Expand Up @@ -81,7 +86,6 @@ def expand_qa(features):
input_ids = tokenizer(
prompt_template, return_tensors="pt"
).input_ids.cuda()
model.to(device)
outputs = model.generate(
input_ids,
temperature=0.7,
Expand Down Expand Up @@ -143,9 +147,17 @@ def process_responses(file_path, output_file_path):
save_response(item)
return data

def run_inference(rank, world_size):
# Process the responses.json file
updated_data = process_responses("responses.json", "functions.json")

# Process the responses.json file
updated_data = process_responses("responses.json", "functions.json")
print("Data saved in functions.json")


print("Data saved in functions.json")
def main():
world_size = 2
mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True)


if __name__ == "__main__":
main()

0 comments on commit 3b75f66

Please sign in to comment.