Skip to content

Commit

Permalink
bugfix
Browse files Browse the repository at this point in the history
  • Loading branch information
xuzhao9 committed Nov 14, 2024
1 parent 12f6551 commit 0d856f9
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 2 deletions.
2 changes: 1 addition & 1 deletion tritonbench/operators/ragged_attention/hstu.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def forward(
grid = (1216,)
_ragged_hstu_attn_fwd_persistent[grid](**kwargs)
else:
_RaggedAttentionRelativeBiasFunction.apply(
out = _RaggedAttentionRelativeBiasFunction.apply(
self.max_seq_len, # N
kwargs["alpha"],
q,
Expand Down
1 change: 0 additions & 1 deletion tritonbench/operators/ragged_attention/operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ def get_bwd_fn(self, fwd_fn: Callable[..., Any]) -> Callable[..., Any]:
lambda x: isinstance(x, torch.Tensor),
o,
)
print(o)
do = torch.rand_like(o_tensor)
fn = lambda: o_tensor.backward(do, retain_graph=True)
return fn
Expand Down

0 comments on commit 0d856f9

Please sign in to comment.