Skip to content

Commit

Permalink
Disable donated_buffer for all ops's backward benchmarking (#104)
Browse files Browse the repository at this point in the history
Summary:
It is still a temporary fix for backward benchmarking. Related discussion #40

Pull Request resolved: #104

Reviewed By: xuzhao9

Differential Revision: D66911331

Pulled By: FindHao

fbshipit-source-id: 6b3e5188fb6c929d6fe34aaf3a141bafa92c33f3
  • Loading branch information
FindHao authored and facebook-github-bot committed Dec 9, 2024
1 parent 1fb46a3 commit 642ac1e
Show file tree
Hide file tree
Showing 3 changed files with 15 additions and 0 deletions.
7 changes: 7 additions & 0 deletions tritonbench/operators/geglu/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from tritonbench.utils.triton_op import (
BenchmarkOperator,
Mode,
register_benchmark,
register_x_val,
)
Expand Down Expand Up @@ -59,6 +60,12 @@ def liger_geglu(self, input) -> Callable:

@register_benchmark()
def inductor_geglu(self, input) -> Callable:
# TODO: remove this once we have a better way to handle backward benchmarking
# We need to run backward multiple times for proper benchmarking
# so donated buffer have to be disabled
if self.mode == Mode.BWD or self.mode == Mode.FWD_BWD:
import torch._functorch.config

compiled = torch.compile(self.baseline_model)
return lambda: compiled(input)

Expand Down
1 change: 1 addition & 0 deletions tritonbench/operators/layer_norm/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ def torch_layer_norm(self, *args):

@register_benchmark()
def torch_compile_layer_norm(self, *args):
# TODO: remove this once we have a better way to handle backward benchmarking
# We need to run backward multiple times for proper benchmarking
# so donated buffer have to be disabled
if self.mode == Mode.BWD or self.mode == Mode.FWD_BWD:
Expand Down
7 changes: 7 additions & 0 deletions tritonbench/operators/swiglu/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@

from tritonbench.utils.triton_op import (
BenchmarkOperator,
Mode,
register_benchmark,
register_x_val,
)
Expand Down Expand Up @@ -59,6 +60,12 @@ def liger_swiglu(self, input) -> Callable:

@register_benchmark()
def inductor_swiglu(self, input) -> Callable:
# TODO: remove this once we have a better way to handle backward benchmarking
# We need to run backward multiple times for proper benchmarking
# so donated buffer have to be disabled
if self.mode == Mode.BWD or self.mode == Mode.FWD_BWD:
import torch._functorch.config

compiled = torch.compile(self.baseline_op)
return lambda: compiled(input)

Expand Down

0 comments on commit 642ac1e

Please sign in to comment.