Skip to content

Commit

Permalink
更新文档
Browse files Browse the repository at this point in the history
  • Loading branch information
liuxinwei committed Jul 11, 2024
1 parent fefab40 commit 198a344
Show file tree
Hide file tree
Showing 17 changed files with 934 additions and 890 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -144,3 +144,7 @@ pdm.lock
# temp
.temp/
temp/
*.torchscript
*.pt
*.onnx
*.pth
85 changes: 50 additions & 35 deletions doc/tutorials/pattern/dataflow/rewrite/SqueezeExcitation.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -81,12 +81,23 @@
"\n",
" def forward(self, input: Tensor) -> Tensor:\n",
" scale = self._scale(input, True)\n",
" return scale * input"
" return scale * input\n",
"\n",
"class M(nn.Module):\n",
" def __init__(self, input_channels: int=16):\n",
" super().__init__()\n",
" self.conv = nn.Conv2d(input_channels, 64, 1, bias=False)\n",
" self.se_layer = SqueezeExcitation(input_channels)\n",
"\n",
" def forward(self, x: Tensor) -> Tensor:\n",
" x = self.se_layer(x)\n",
" x = self.conv(x)\n",
" return x"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -95,22 +106,22 @@
"data_np = (np.random.randint(0, 256, shape)/255).astype(\"float32\")\n",
"data_torch = torch.from_numpy(data_np)\n",
"\n",
"model = SqueezeExcitation(input_channels=16).eval()\n",
"model = M(input_channels=16).eval()\n",
"scripted_model = torch.jit.trace(model, data_torch).eval()\n",
"shape_list = [(name, shape)]\n",
"mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* span=aten::adaptive_avg_pool2d_0.x:0:0 */, %aten::_convolution_0.weight: Tensor[(8, 16, 1, 1), float32] /* span=aten::_convolution_0.weight:0:0 */, %aten::_convolution_0.bias: Tensor[(8), float32] /* span=aten::_convolution_0.bias:0:0 */, %aten::_convolution_1.weight: Tensor[(16, 8, 1, 1), float32] /* span=aten::_convolution_1.weight:0:0 */, %aten::_convolution_1.bias: Tensor[(16), float32] /* span=aten::_convolution_1.bias:0:0 */) {\n",
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* span=aten::adaptive_avg_pool2d_0.x:0:0 */, %aten::_convolution_0.weight: Tensor[(8, 16, 1, 1), float32] /* span=aten::_convolution_0.weight:0:0 */, %aten::_convolution_0.bias: Tensor[(8), float32] /* span=aten::_convolution_0.bias:0:0 */, %aten::_convolution_1.weight: Tensor[(16, 8, 1, 1), float32] /* span=aten::_convolution_1.weight:0:0 */, %aten::_convolution_1.bias: Tensor[(16), float32] /* span=aten::_convolution_1.bias:0:0 */, %aten::_convolution_2.weight: Tensor[(64, 16, 1, 1), float32] /* span=aten::_convolution_2.weight:0:0 */) {\n",
" %0 = nn.adaptive_avg_pool2d(%x, output_size=[1, 1]) /* span=aten::adaptive_avg_pool2d_0:0:0 */;\n",
" %1 = nn.conv2d(%0, %aten::_convolution_0.weight, padding=[0, 0, 0, 0], channels=8, kernel_size=[1, 1]) /* span=aten::_convolution_0:0:0 */;\n",
" %2 = nn.bias_add(%1, %aten::_convolution_0.bias) /* span=aten::_convolution_0:0:0 */;\n",
Expand All @@ -120,7 +131,8 @@
" %6 = add(%5, 3f /* span=aten::hardsigmoid__0:0:0 */) /* span=aten::hardsigmoid__0:0:0 */;\n",
" %7 = clip(%6, a_min=0f, a_max=6f) /* span=aten::hardsigmoid__0:0:0 */;\n",
" %8 = divide(%7, 6f /* span=aten::hardsigmoid__0:0:0 */) /* span=aten::hardsigmoid__0:0:0 */;\n",
" multiply(%8, %x) /* span=aten::mul_0:0:0 */\n",
" %9 = multiply(%8, %x) /* span=aten::mul_0:0:0 */;\n",
" nn.conv2d(%9, %aten::_convolution_2.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* span=aten::_convolution_2:0:0 */\n",
"}\n"
]
}
Expand All @@ -131,7 +143,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -143,74 +155,77 @@
" [\n",
" relay.transform.SimplifyInference(),\n",
" relay.transform.FoldConstant(),\n",
" # relay.transform.FoldScaleAxis(),\n",
" relay.transform.CanonicalizeOps(),\n",
" relay.transform.FoldConstant(),\n",
" relay.transform.FoldScaleAxis(),\n",
" # relay.transform.CanonicalizeOps(),\n",
" # relay.transform.FoldConstant(),\n",
" ]\n",
")\n",
"run_mod = deepcopy(mod)\n",
"run_mod[\"main\"] = _bind_params(run_mod[\"main\"], params)\n",
"with tvm.transform.PassContext(opt_level=3):\n",
" run_mod2 = relay.quantize.prerequisite_optimize(deepcopy(mod), params)\n",
" # run_mod2 = relay.quantize.prerequisite_optimize(deepcopy(mod), params)\n",
" run_mod = optimize(run_mod)"
]
},
{
"cell_type": "code",
"execution_count": 33,
"execution_count": 12,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* ty=Tensor[(1, 16, 64, 48), float32] span=aten::adaptive_avg_pool2d_0.x:0:0 */) -> Tensor[(1, 16, 64, 48), float32] {\n",
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* ty=Tensor[(1, 16, 64, 48), float32] span=aten::adaptive_avg_pool2d_0.x:0:0 */) -> Tensor[(1, 64, 64, 48), float32] {\n",
" %0 = nn.adaptive_avg_pool2d(%x, output_size=[1, 1]) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::adaptive_avg_pool2d_0:0:0 */;\n",
" %1 = nn.conv2d(%0, meta[relay.Constant][0] /* ty=Tensor[(8, 16, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=8, kernel_size=[1, 1]) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::_convolution_0:0:0 */;\n",
" %2 = add(%1, meta[relay.Constant][1] /* ty=Tensor[(8, 1, 1), float32] */) /* ty=Tensor[(1, 8, 1, 1), float32] */;\n",
" %1 = nn.conv2d(%0, meta[relay.Constant][1] /* ty=Tensor[(8, 16, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=8, kernel_size=[1, 1]) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::_convolution_0:0:0 */;\n",
" %2 = nn.bias_add(%1, meta[relay.Constant][2] /* ty=Tensor[(8), float32] */) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::_convolution_0:0:0 */;\n",
" %3 = nn.relu(%2) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::relu__0:0:0 */;\n",
" %4 = nn.conv2d(%3, meta[relay.Constant][2] /* ty=Tensor[(16, 8, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::_convolution_1:0:0 */;\n",
" %5 = add(%4, meta[relay.Constant][3] /* ty=Tensor[(16, 1, 1), float32] */) /* ty=Tensor[(1, 16, 1, 1), float32] */;\n",
" %4 = nn.conv2d(%3, meta[relay.Constant][3] /* ty=Tensor[(16, 8, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::_convolution_1:0:0 */;\n",
" %5 = nn.bias_add(%4, meta[relay.Constant][4] /* ty=Tensor[(16), float32] */) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::_convolution_1:0:0 */;\n",
" %6 = add(%5, 3f /* ty=float32 span=aten::hardsigmoid__0:0:0 */) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" %7 = clip(%6, a_min=0f, a_max=6f) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" %8 = divide(%7, 6f /* ty=float32 span=aten::hardsigmoid__0:0:0 */) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" multiply(%8, %x) /* ty=Tensor[(1, 16, 64, 48), float32] span=aten::mul_0:0:0 */\n",
"} /* ty=fn (Tensor[(1, 16, 64, 48), float32]) -> Tensor[(1, 16, 64, 48), float32] */\n",
" %9 = squeeze(%8, axis=[0, 2, 3]) /* ty=Tensor[(16), float32] */;\n",
" %10 = expand_dims(%9, axis=1, num_newaxis=2) /* ty=Tensor[(16, 1, 1), float32] */;\n",
" %11 = multiply(meta[relay.Constant][0] /* ty=Tensor[(64, 16, 1, 1), float32] */, %10) /* ty=Tensor[(64, 16, 1, 1), float32] */;\n",
" nn.conv2d(%x, %11, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* ty=Tensor[(1, 64, 64, 48), float32] */\n",
"} /* ty=fn (Tensor[(1, 16, 64, 48), float32]) -> Tensor[(1, 64, 64, 48), float32] */\n",
"\n"
]
}
],
"source": [
"print(run_mod2[\"main\"])"
"print(run_mod[\"main\"])"
]
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* ty=Tensor[(1, 16, 64, 48), float32] span=aten::adaptive_avg_pool2d_0.x:0:0 */) -> Tensor[(1, 16, 64, 48), float32] {\n",
" %0 = nn.adaptive_avg_pool2d(%x, output_size=[1, 1]) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::adaptive_avg_pool2d_0:0:0 */;\n",
" %1 = nn.conv2d(%0, meta[relay.Constant][0] /* ty=Tensor[(8, 16, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=8, kernel_size=[1, 1]) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::_convolution_0:0:0 */;\n",
" %2 = add(%1, meta[relay.Constant][1] /* ty=Tensor[(8, 1, 1), float32] */) /* ty=Tensor[(1, 8, 1, 1), float32] */;\n",
" %3 = nn.relu(%2) /* ty=Tensor[(1, 8, 1, 1), float32] span=aten::relu__0:0:0 */;\n",
" %4 = nn.conv2d(%3, meta[relay.Constant][2] /* ty=Tensor[(16, 8, 1, 1), float32] */, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::_convolution_1:0:0 */;\n",
" %5 = add(%4, meta[relay.Constant][3] /* ty=Tensor[(16, 1, 1), float32] */) /* ty=Tensor[(1, 16, 1, 1), float32] */;\n",
" %6 = add(%5, 3f /* ty=float32 span=aten::hardsigmoid__0:0:0 */) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" %7 = clip(%6, a_min=0f, a_max=6f) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" %8 = divide(%7, 6f /* ty=float32 span=aten::hardsigmoid__0:0:0 */) /* ty=Tensor[(1, 16, 1, 1), float32] span=aten::hardsigmoid__0:0:0 */;\n",
" multiply(%8, %x) /* ty=Tensor[(1, 16, 64, 48), float32] span=aten::mul_0:0:0 */\n",
"} /* ty=fn (Tensor[(1, 16, 64, 48), float32]) -> Tensor[(1, 16, 64, 48), float32] */\n",
"\n"
"fn (%x: Tensor[(1, 16, 64, 48), float32] /* span=aten::adaptive_avg_pool2d_0.x:0:0 */, %aten::_convolution_0.weight: Tensor[(8, 16, 1, 1), float32] /* span=aten::_convolution_0.weight:0:0 */, %aten::_convolution_0.bias: Tensor[(8), float32] /* span=aten::_convolution_0.bias:0:0 */, %aten::_convolution_1.weight: Tensor[(16, 8, 1, 1), float32] /* span=aten::_convolution_1.weight:0:0 */, %aten::_convolution_1.bias: Tensor[(16), float32] /* span=aten::_convolution_1.bias:0:0 */, %aten::_convolution_2.weight: Tensor[(64, 16, 1, 1), float32] /* span=aten::_convolution_2.weight:0:0 */) {\n",
" %0 = nn.adaptive_avg_pool2d(%x, output_size=[1, 1]) /* span=aten::adaptive_avg_pool2d_0:0:0 */;\n",
" %1 = nn.conv2d(%0, %aten::_convolution_0.weight, padding=[0, 0, 0, 0], channels=8, kernel_size=[1, 1]) /* span=aten::_convolution_0:0:0 */;\n",
" %2 = nn.bias_add(%1, %aten::_convolution_0.bias) /* span=aten::_convolution_0:0:0 */;\n",
" %3 = nn.relu(%2) /* span=aten::relu__0:0:0 */;\n",
" %4 = nn.conv2d(%3, %aten::_convolution_1.weight, padding=[0, 0, 0, 0], channels=16, kernel_size=[1, 1]) /* span=aten::_convolution_1:0:0 */;\n",
" %5 = nn.bias_add(%4, %aten::_convolution_1.bias) /* span=aten::_convolution_1:0:0 */;\n",
" %6 = add(%5, 3f /* span=aten::hardsigmoid__0:0:0 */) /* span=aten::hardsigmoid__0:0:0 */;\n",
" %7 = clip(%6, a_min=0f, a_max=6f) /* span=aten::hardsigmoid__0:0:0 */;\n",
" %8 = divide(%7, 6f /* span=aten::hardsigmoid__0:0:0 */) /* span=aten::hardsigmoid__0:0:0 */;\n",
" %9 = multiply(%8, %x) /* span=aten::mul_0:0:0 */;\n",
" nn.conv2d(%9, %aten::_convolution_2.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* span=aten::_convolution_2:0:0 */\n",
"}\n"
]
}
],
"source": [
"print(run_mod[\"main\"])"
"print(mod[\"main\"])"
]
},
{
Expand Down
3 changes: 2 additions & 1 deletion tests/book/doc/tests/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
```{toctree}
:glob:
*
FoldScaleAxis/index
mobilenetv3/index
yolo/index
```
File renamed without changes.
Loading

0 comments on commit 198a344

Please sign in to comment.