Skip to content

Commit

Permalink
Add reset dynamo option (#125)
Browse files Browse the repository at this point in the history
Summary:
Fix #90

When we run multiple inputs at one time, it may let torch._dynamo hit config.cache_size_limit. This PR adds an option to reset dynamo for an operator.

Pull Request resolved: #125

Reviewed By: xuzhao9

Differential Revision: D67907042

Pulled By: FindHao

fbshipit-source-id: 85c24c42fd57682c735c176b91487bf393e62ac0
  • Loading branch information
FindHao authored and facebook-github-bot committed Jan 7, 2025
1 parent a2f668e commit eeac9ca
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 0 deletions.
1 change: 1 addition & 0 deletions tritonbench/operators/embedding/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ def __init__(
# they are generated later
self.baseline_op = None
self.liger_op = None
self.reset_dynamo = True

def get_input_iter(self) -> Generator:
for B, T, D in [(32, 512, 768), (8, 2048, 4096)]:
Expand Down
4 changes: 4 additions & 0 deletions tritonbench/utils/triton_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,8 @@ class BenchmarkOperator(metaclass=PostInitProcessor):
example_inputs: Any = None
use_cuda_graphs: bool = False
is_compute_bound = True
# reset dynamo to avoid errors like https://github.com/pytorch-labs/tritonbench/issues/90
reset_dynamo = False

"""
A base class for adding operators to torch benchmark.
Expand Down Expand Up @@ -743,6 +745,8 @@ def run(
self.example_inputs = self.get_example_inputs()
for input_id in input_id_range:
self.example_inputs = self.get_example_inputs()
if self.reset_dynamo:
torch._dynamo.reset()
x_val = self.get_x_val(self.example_inputs)
if "proton" in self.required_metrics:
proton.activate(self._proton_session_id)
Expand Down

0 comments on commit eeac9ca

Please sign in to comment.