From 8294a2f9f3607d4e7c9fc24fb9f666c722823740 Mon Sep 17 00:00:00 2001 From: Shangdi Yu Date: Thu, 19 Dec 2024 11:11:30 -0800 Subject: [PATCH] Fix torch._inductor.aoti_compile_and_package input (#7400) Summary: The inputs to `torch._inductor.aoti_compile_and_package` changed in https://github.com/pytorch/pytorch/pull/140991. The `args` and `kwargs` do not need to be inputed anymore. The API will get them from `exported_program.example_inputs` Differential Revision: D67436429 --- .ci/docker/ci_commit_pins/pytorch.txt | 2 +- examples/models/llama3_2_vision/install_requirements.sh | 2 +- .../llama3_2_vision/text_decoder/test/test_text_decoder.py | 2 -- .../vision_encoder/test/test_vision_encoder.py | 1 - extension/llm/modules/test/test_position_embeddings.py | 1 - install_requirements.py | 6 +++--- 6 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index d1e1e4843b..c89e497a5d 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -19eff28ff3f19b50da46f5a9ff5f4d4d213806fe +288aa873831057b1eb7d747914ec4fdc76c23a80 diff --git a/examples/models/llama3_2_vision/install_requirements.sh b/examples/models/llama3_2_vision/install_requirements.sh index de8ed0632d..1b81dd211f 100755 --- a/examples/models/llama3_2_vision/install_requirements.sh +++ b/examples/models/llama3_2_vision/install_requirements.sh @@ -5,7 +5,7 @@ # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. -NIGHTLY_VERSION="dev20241112" +NIGHTLY_VERSION="dev20241218" # Install torchtune nightly for model definitions. pip install --pre torchtune==0.4.0.${NIGHTLY_VERSION} --extra-index-url https://download.pytorch.org/whl/nightly/cpu --no-cache-dir diff --git a/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py b/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py index 8e678801b8..e6bf2ddd31 100644 --- a/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py +++ b/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py @@ -74,8 +74,6 @@ def test_llama3_2_text_decoder_aoti(self) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( ep, - model.get_example_inputs(), - kwargs=model.get_example_kwarg_inputs(), package_path=os.path.join(tmpdir, "text_decoder.pt2"), ) encoder_aoti = torch._inductor.aoti_load_package(path) diff --git a/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py b/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py index c2f1e77cee..7721350014 100644 --- a/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py +++ b/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py @@ -36,7 +36,6 @@ def test_flamingo_vision_encoder(self) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( ep, - model.get_example_inputs(), package_path=os.path.join(tmpdir, "vision_encoder.pt2"), ) print(path) diff --git a/extension/llm/modules/test/test_position_embeddings.py b/extension/llm/modules/test/test_position_embeddings.py index 039cc798b1..5e92f92df6 100644 --- a/extension/llm/modules/test/test_position_embeddings.py +++ b/extension/llm/modules/test/test_position_embeddings.py @@ -177,7 +177,6 @@ def test_tiled_token_positional_embedding_aoti(self): with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( tpe_ep, - (self.x, self.aspect_ratio), package_path=os.path.join(tmpdir, "tpe.pt2"), ) tpe_aoti = load_package(path) diff --git a/install_requirements.py b/install_requirements.py index ace2f34b70..18b5d6f9b9 100644 --- a/install_requirements.py +++ b/install_requirements.py @@ -112,7 +112,7 @@ def python_is_compatible(): # NOTE: If a newly-fetched version of the executorch repo changes the value of # NIGHTLY_VERSION, you should re-run this script to install the necessary # package versions. -NIGHTLY_VERSION = "dev20241112" +NIGHTLY_VERSION = "dev20241218" # The pip repository that hosts nightly torch packages. TORCH_NIGHTLY_URL = "https://download.pytorch.org/whl/nightly/cpu" @@ -124,7 +124,7 @@ def python_is_compatible(): # been installed on CI before this step, so pip won't reinstall them f"torch==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torch", ( - f"torchvision==0.20.0.{NIGHTLY_VERSION}" + f"torchvision==0.22.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchvision" ), # For testing. @@ -135,7 +135,7 @@ def python_is_compatible(): # TODO: Make each example publish its own requirements.txt EXAMPLES_REQUIREMENTS = [ "timm==1.0.7", - f"torchaudio==2.5.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio", + f"torchaudio==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio", "torchsr==1.0.4", "transformers==4.46.1", ]