Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add dim order assert #7561

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions backends/mediatek/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,11 @@ def is_node_supported(self, _, node: torch.fx.Node) -> bool:
return False

op_type = node.target.__name__

# Skip until we can handle the dimension order representation
if op_type == 'aten._to_copy.default':
return False

if op_type in self._op_types_to_skip or node.name in self._op_names_to_skip:
print(
f"[Neuropilot Backend] The {op_type} operator with name '{node.name}' is skipped."
Expand Down
21 changes: 21 additions & 0 deletions backends/mediatek/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,23 @@
SKIP_COMPILE_SPEC_KEYS = {"ImportForever"}


def assert_default_dim_order(edge_graph_module: torch.fx.GraphModule) -> None:
for node in edge_graph_module.graph.nodes:
if node.op != "placeholder":
continue

# We expect the default dim order for all tensor-like inputs i.e. inputs, buffers, and params
t = node.meta.get("val", None)
if t is not None and getattr(t, "dim_order", None) is not None:
default_dim_order = tuple(range(t.dim()))
if t.dim_order() != default_dim_order:
raise RuntimeError(
f"Neuropilot backend only supports contiguous memory format for inputs."
f"Expecting dim_order: {default_dim_order}, but got "
f"{node.meta['val'].dim_order()} for a placeholder node {node}."
)


@final
class NeuropilotBackend(BackendDetails):

Expand All @@ -30,6 +47,10 @@ def preprocess(
cls, edge_program: ExportedProgram, module_compile_spec: List[CompileSpec]
) -> PreprocessResult:

# Make sure all inputs are contiguous_format or NCHW or default dim order
print('here')
assert_default_dim_order(edge_program.graph_module)

name_to_node_mappings = {node.name: node for node in edge_program.graph.nodes}
input_names = edge_program.graph_signature.user_inputs
output_names = edge_program.graph_signature.user_outputs
Expand Down
8 changes: 8 additions & 0 deletions backends/mediatek/runtime/NeuronBackend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include "api/NeuronAdapter.h"

#include "executorch/runtime/core/error.h"
#include "executorch/runtime/core/exec_aten/util/dim_order_util.h"

#include <algorithm>
#include <memory>
Expand Down Expand Up @@ -111,6 +112,13 @@ Error NeuronExecuTorchDelegate::execute(
size_t inputCount = mInputSizes.size(), outputCount = mOutputSizes.size();

for (int i = 0; i < inputCount; i++) {
auto tensor_in = args[i]->toTensor();
ET_CHECK_OR_RETURN_ERROR(
runtime::is_contiguous_dim_order(tensor_in.dim_order().data(), tensor_in.dim()),
Internal,
"Expecting default dim_order but got a non default dim_order tensor for external input %u",
i);

auto data_ptr = args[i]->toTensor().data_ptr();
auto data_size = args[i]->toTensor().nbytes();
if (IsCached</*isInput=*/true>(i, data_ptr)) {
Expand Down
Loading