Note
This tutorial can be used interactively with Google Colab! You can also click here to run the Jupyter notebook locally.
Deploy the Pretrained Model on Adreno™ with tvmc Interface¶
Author: Siva Rama Krishna
This article is a step-by-step tutorial to deploy pretrained PyTorch resnet50 model on Adreno™.
Besides that, you should have TVM built for Android. See the following instructions on how to build it and setup RPC environment.
import os
import tvm
import numpy as np
from tvm import relay
from tvm.driver import tvmc
from tvm.driver.tvmc.model import TVMCPackage
from tvm.contrib import utils
Configuration¶
Specify Adreno target before compiling to generate texture
leveraging kernels and get all the benefits of textures
Note: This generated example running on our x86 server for demonstration.
If running it on the Android device, we need to
specify its instruction set. Set local_demo
to False if you want
to run this tutorial with a real device over rpc.
local_demo = True
# by default on CPU target will execute.
# select 'llvm', 'opencl' and 'opencl -device=adreno'
target = "llvm"
# Change target configuration.
# Run `adb shell cat /proc/cpuinfo` to find the arch.
arch = "arm64"
target_host = "llvm -mtriple=%s-linux-android" % arch
# Auto tuning is compute and time taking task, hence disabling for default run. Please enable it if required.
is_tuning = False
tune_log = "adreno-resnet50.log"
# To enable OpenCLML accelerated operator library.
enable_clml = False
cross_compiler = (
os.getenv("ANDROID_NDK_HOME", "")
+ "/toolchains/llvm/prebuilt/linux-x86_64/bin/aarch64-linux-android28-clang"
)
Make a PyTorch Resnet50 Model¶
import torch
import torchvision.models as models
# Load the ResNet50 model pre-trained on ImageNet
model = models.resnet50(pretrained=True)
# Set the model to evaluation mode
model.eval()
# Define the input shape
dummy_input = torch.randn(1, 3, 224, 224)
# Trace the model
traced_model = torch.jit.trace(model, dummy_input)
# Save the traced model
model_file_name = "resnet50_traced.pt"
traced_model.save(model_file_name)
/venv/apache-tvm-py3.8/lib/python3.8/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
warnings.warn(
/venv/apache-tvm-py3.8/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet50_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet50_Weights.DEFAULT` to get the most up-to-date weights.
warnings.warn(msg)
Downloading: "https://download.pytorch.org/models/resnet50-0676ba61.pth" to /workspace/.cache/torch/hub/checkpoints/resnet50-0676ba61.pth
0%| | 0.00/97.8M [00:00<?, ?B/s]
16%|#6 | 15.8M/97.8M [00:00<00:00, 165MB/s]
38%|###8 | 37.5M/97.8M [00:00<00:00, 201MB/s]
61%|###### | 59.5M/97.8M [00:00<00:00, 215MB/s]
84%|########3 | 81.9M/97.8M [00:00<00:00, 222MB/s]
100%|##########| 97.8M/97.8M [00:00<00:00, 217MB/s]
Load Model¶
Convert a model from any framework to a tvm relay module. tvmc.load supports models from any framework (like tensorflow saves_model, onnx, tflite ..etc) and auto detects the filetype.
input_shape = (1, 3, 224, 224) # Batch size, channels, height, width
# Load the TorchScript model with TVMC
tvmc_model = tvmc.load(model_file_name, shape_dict={"input": input_shape}, model_format="pytorch")
print(tvmc_model.mod)
# tvmc_model consists of tvmc_mode.mod which is relay module and tvmc_model.params which parms of the module.
type List[A] {
Cons(A, List[A]),
Nil,
}
type Option[A] {
Some(A),
None,
}
type Tree[A] {
Rose(A, List[Tree[A]]),
}
type tensor_float16_t {
tensor_nil_float16,
tensor0_float16(float16),
tensor1_float16(Tensor[(?), float16]),
tensor2_float16(Tensor[(?, ?), float16]),
tensor3_float16(Tensor[(?, ?, ?), float16]),
tensor4_float16(Tensor[(?, ?, ?, ?), float16]),
tensor5_float16(Tensor[(?, ?, ?, ?, ?), float16]),
tensor6_float16(Tensor[(?, ?, ?, ?, ?, ?), float16]),
}
type tensor_float32_t {
tensor_nil_float32,
tensor0_float32(float32),
tensor1_float32(Tensor[(?), float32]),
tensor2_float32(Tensor[(?, ?), float32]),
tensor3_float32(Tensor[(?, ?, ?), float32]),
tensor4_float32(Tensor[(?, ?, ?, ?), float32]),
tensor5_float32(Tensor[(?, ?, ?, ?, ?), float32]),
tensor6_float32(Tensor[(?, ?, ?, ?, ?, ?), float32]),
}
type tensor_float64_t {
tensor_nil_float64,
tensor0_float64(float64),
tensor1_float64(Tensor[(?), float64]),
tensor2_float64(Tensor[(?, ?), float64]),
tensor3_float64(Tensor[(?, ?, ?), float64]),
tensor4_float64(Tensor[(?, ?, ?, ?), float64]),
tensor5_float64(Tensor[(?, ?, ?, ?, ?), float64]),
tensor6_float64(Tensor[(?, ?, ?, ?, ?, ?), float64]),
}
type tensor_int16_t {
tensor_nil_int16,
tensor0_int16(int16),
tensor1_int16(Tensor[(?), int16]),
tensor2_int16(Tensor[(?, ?), int16]),
tensor3_int16(Tensor[(?, ?, ?), int16]),
tensor4_int16(Tensor[(?, ?, ?, ?), int16]),
tensor5_int16(Tensor[(?, ?, ?, ?, ?), int16]),
tensor6_int16(Tensor[(?, ?, ?, ?, ?, ?), int16]),
}
type tensor_int32_t {
tensor_nil_int32,
tensor0_int32(int32),
tensor1_int32(Tensor[(?), int32]),
tensor2_int32(Tensor[(?, ?), int32]),
tensor3_int32(Tensor[(?, ?, ?), int32]),
tensor4_int32(Tensor[(?, ?, ?, ?), int32]),
tensor5_int32(Tensor[(?, ?, ?, ?, ?), int32]),
tensor6_int32(Tensor[(?, ?, ?, ?, ?, ?), int32]),
}
type tensor_int64_t {
tensor_nil_int64,
tensor0_int64(int64),
tensor1_int64(Tensor[(?), int64]),
tensor2_int64(Tensor[(?, ?), int64]),
tensor3_int64(Tensor[(?, ?, ?), int64]),
tensor4_int64(Tensor[(?, ?, ?, ?), int64]),
tensor5_int64(Tensor[(?, ?, ?, ?, ?), int64]),
tensor6_int64(Tensor[(?, ?, ?, ?, ?, ?), int64]),
}
type tensor_int8_t {
tensor_nil_int8,
tensor0_int8(int8),
tensor1_int8(Tensor[(?), int8]),
tensor2_int8(Tensor[(?, ?), int8]),
tensor3_int8(Tensor[(?, ?, ?), int8]),
tensor4_int8(Tensor[(?, ?, ?, ?), int8]),
tensor5_int8(Tensor[(?, ?, ?, ?, ?), int8]),
tensor6_int8(Tensor[(?, ?, ?, ?, ?, ?), int8]),
}
type tensor_uint16_t {
tensor_nil_uint16,
tensor0_uint16(uint16),
tensor1_uint16(Tensor[(?), uint16]),
tensor2_uint16(Tensor[(?, ?), uint16]),
tensor3_uint16(Tensor[(?, ?, ?), uint16]),
tensor4_uint16(Tensor[(?, ?, ?, ?), uint16]),
tensor5_uint16(Tensor[(?, ?, ?, ?, ?), uint16]),
tensor6_uint16(Tensor[(?, ?, ?, ?, ?, ?), uint16]),
}
type tensor_uint8_t {
tensor_nil_uint8,
tensor0_uint8(uint8),
tensor1_uint8(Tensor[(?), uint8]),
tensor2_uint8(Tensor[(?, ?), uint8]),
tensor3_uint8(Tensor[(?, ?, ?), uint8]),
tensor4_uint8(Tensor[(?, ?, ?, ?), uint8]),
tensor5_uint8(Tensor[(?, ?, ?, ?, ?), uint8]),
tensor6_uint8(Tensor[(?, ?, ?, ?, ?, ?), uint8]),
}
def @main(%input: Tensor[(1, 3, 224, 224), float32] /* span=aten::_convolution_0.input:0:0 */, %aten::_convolution_0.weight: Tensor[(64, 3, 7, 7), float32] /* span=aten::_convolution_0.weight:0:0 */, %aten::batch_norm_0.weight: Tensor[(64), float32] /* span=aten::batch_norm_0.weight:0:0 */, %aten::batch_norm_0.bias: Tensor[(64), float32] /* span=aten::batch_norm_0.bias:0:0 */, %aten::batch_norm_0.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_0.running_mean:0:0 */, %aten::batch_norm_0.running_var: Tensor[(64), float32] /* span=aten::batch_norm_0.running_var:0:0 */, %aten::_convolution_1.weight: Tensor[(64, 64, 1, 1), float32] /* span=aten::_convolution_1.weight:0:0 */, %aten::batch_norm_1.weight: Tensor[(64), float32] /* span=aten::batch_norm_1.weight:0:0 */, %aten::batch_norm_1.bias: Tensor[(64), float32] /* span=aten::batch_norm_1.bias:0:0 */, %aten::batch_norm_1.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_1.running_mean:0:0 */, %aten::batch_norm_1.running_var: Tensor[(64), float32] /* span=aten::batch_norm_1.running_var:0:0 */, %aten::_convolution_2.weight: Tensor[(64, 64, 3, 3), float32] /* span=aten::_convolution_2.weight:0:0 */, %aten::batch_norm_2.weight: Tensor[(64), float32] /* span=aten::batch_norm_2.weight:0:0 */, %aten::batch_norm_2.bias: Tensor[(64), float32] /* span=aten::batch_norm_2.bias:0:0 */, %aten::batch_norm_2.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_2.running_mean:0:0 */, %aten::batch_norm_2.running_var: Tensor[(64), float32] /* span=aten::batch_norm_2.running_var:0:0 */, %aten::_convolution_3.weight: Tensor[(256, 64, 1, 1), float32] /* span=aten::_convolution_3.weight:0:0 */, %aten::batch_norm_3.weight: Tensor[(256), float32] /* span=aten::batch_norm_3.weight:0:0 */, %aten::batch_norm_3.bias: Tensor[(256), float32] /* span=aten::batch_norm_3.bias:0:0 */, %aten::batch_norm_3.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_3.running_mean:0:0 */, %aten::batch_norm_3.running_var: Tensor[(256), float32] /* span=aten::batch_norm_3.running_var:0:0 */, %aten::_convolution_4.weight: Tensor[(256, 64, 1, 1), float32] /* span=aten::_convolution_4.weight:0:0 */, %aten::batch_norm_4.weight: Tensor[(256), float32] /* span=aten::batch_norm_4.weight:0:0 */, %aten::batch_norm_4.bias: Tensor[(256), float32] /* span=aten::batch_norm_4.bias:0:0 */, %aten::batch_norm_4.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_4.running_mean:0:0 */, %aten::batch_norm_4.running_var: Tensor[(256), float32] /* span=aten::batch_norm_4.running_var:0:0 */, %aten::_convolution_5.weight: Tensor[(64, 256, 1, 1), float32] /* span=aten::_convolution_5.weight:0:0 */, %aten::batch_norm_5.weight: Tensor[(64), float32] /* span=aten::batch_norm_5.weight:0:0 */, %aten::batch_norm_5.bias: Tensor[(64), float32] /* span=aten::batch_norm_5.bias:0:0 */, %aten::batch_norm_5.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_5.running_mean:0:0 */, %aten::batch_norm_5.running_var: Tensor[(64), float32] /* span=aten::batch_norm_5.running_var:0:0 */, %aten::_convolution_6.weight: Tensor[(64, 64, 3, 3), float32] /* span=aten::_convolution_6.weight:0:0 */, %aten::batch_norm_6.weight: Tensor[(64), float32] /* span=aten::batch_norm_6.weight:0:0 */, %aten::batch_norm_6.bias: Tensor[(64), float32] /* span=aten::batch_norm_6.bias:0:0 */, %aten::batch_norm_6.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_6.running_mean:0:0 */, %aten::batch_norm_6.running_var: Tensor[(64), float32] /* span=aten::batch_norm_6.running_var:0:0 */, %aten::_convolution_7.weight: Tensor[(256, 64, 1, 1), float32] /* span=aten::_convolution_7.weight:0:0 */, %aten::batch_norm_7.weight: Tensor[(256), float32] /* span=aten::batch_norm_7.weight:0:0 */, %aten::batch_norm_7.bias: Tensor[(256), float32] /* span=aten::batch_norm_7.bias:0:0 */, %aten::batch_norm_7.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_7.running_mean:0:0 */, %aten::batch_norm_7.running_var: Tensor[(256), float32] /* span=aten::batch_norm_7.running_var:0:0 */, %aten::_convolution_8.weight: Tensor[(64, 256, 1, 1), float32] /* span=aten::_convolution_8.weight:0:0 */, %aten::batch_norm_8.weight: Tensor[(64), float32] /* span=aten::batch_norm_8.weight:0:0 */, %aten::batch_norm_8.bias: Tensor[(64), float32] /* span=aten::batch_norm_8.bias:0:0 */, %aten::batch_norm_8.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_8.running_mean:0:0 */, %aten::batch_norm_8.running_var: Tensor[(64), float32] /* span=aten::batch_norm_8.running_var:0:0 */, %aten::_convolution_9.weight: Tensor[(64, 64, 3, 3), float32] /* span=aten::_convolution_9.weight:0:0 */, %aten::batch_norm_9.weight: Tensor[(64), float32] /* span=aten::batch_norm_9.weight:0:0 */, %aten::batch_norm_9.bias: Tensor[(64), float32] /* span=aten::batch_norm_9.bias:0:0 */, %aten::batch_norm_9.running_mean: Tensor[(64), float32] /* span=aten::batch_norm_9.running_mean:0:0 */, %aten::batch_norm_9.running_var: Tensor[(64), float32] /* span=aten::batch_norm_9.running_var:0:0 */, %aten::_convolution_10.weight: Tensor[(256, 64, 1, 1), float32] /* span=aten::_convolution_10.weight:0:0 */, %aten::batch_norm_10.weight: Tensor[(256), float32] /* span=aten::batch_norm_10.weight:0:0 */, %aten::batch_norm_10.bias: Tensor[(256), float32] /* span=aten::batch_norm_10.bias:0:0 */, %aten::batch_norm_10.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_10.running_mean:0:0 */, %aten::batch_norm_10.running_var: Tensor[(256), float32] /* span=aten::batch_norm_10.running_var:0:0 */, %aten::_convolution_11.weight: Tensor[(128, 256, 1, 1), float32] /* span=aten::_convolution_11.weight:0:0 */, %aten::batch_norm_11.weight: Tensor[(128), float32] /* span=aten::batch_norm_11.weight:0:0 */, %aten::batch_norm_11.bias: Tensor[(128), float32] /* span=aten::batch_norm_11.bias:0:0 */, %aten::batch_norm_11.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_11.running_mean:0:0 */, %aten::batch_norm_11.running_var: Tensor[(128), float32] /* span=aten::batch_norm_11.running_var:0:0 */, %aten::_convolution_12.weight: Tensor[(128, 128, 3, 3), float32] /* span=aten::_convolution_12.weight:0:0 */, %aten::batch_norm_12.weight: Tensor[(128), float32] /* span=aten::batch_norm_12.weight:0:0 */, %aten::batch_norm_12.bias: Tensor[(128), float32] /* span=aten::batch_norm_12.bias:0:0 */, %aten::batch_norm_12.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_12.running_mean:0:0 */, %aten::batch_norm_12.running_var: Tensor[(128), float32] /* span=aten::batch_norm_12.running_var:0:0 */, %aten::_convolution_13.weight: Tensor[(512, 128, 1, 1), float32] /* span=aten::_convolution_13.weight:0:0 */, %aten::batch_norm_13.weight: Tensor[(512), float32] /* span=aten::batch_norm_13.weight:0:0 */, %aten::batch_norm_13.bias: Tensor[(512), float32] /* span=aten::batch_norm_13.bias:0:0 */, %aten::batch_norm_13.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_13.running_mean:0:0 */, %aten::batch_norm_13.running_var: Tensor[(512), float32] /* span=aten::batch_norm_13.running_var:0:0 */, %aten::_convolution_14.weight: Tensor[(512, 256, 1, 1), float32] /* span=aten::_convolution_14.weight:0:0 */, %aten::batch_norm_14.weight: Tensor[(512), float32] /* span=aten::batch_norm_14.weight:0:0 */, %aten::batch_norm_14.bias: Tensor[(512), float32] /* span=aten::batch_norm_14.bias:0:0 */, %aten::batch_norm_14.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_14.running_mean:0:0 */, %aten::batch_norm_14.running_var: Tensor[(512), float32] /* span=aten::batch_norm_14.running_var:0:0 */, %aten::_convolution_15.weight: Tensor[(128, 512, 1, 1), float32] /* span=aten::_convolution_15.weight:0:0 */, %aten::batch_norm_15.weight: Tensor[(128), float32] /* span=aten::batch_norm_15.weight:0:0 */, %aten::batch_norm_15.bias: Tensor[(128), float32] /* span=aten::batch_norm_15.bias:0:0 */, %aten::batch_norm_15.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_15.running_mean:0:0 */, %aten::batch_norm_15.running_var: Tensor[(128), float32] /* span=aten::batch_norm_15.running_var:0:0 */, %aten::_convolution_16.weight: Tensor[(128, 128, 3, 3), float32] /* span=aten::_convolution_16.weight:0:0 */, %aten::batch_norm_16.weight: Tensor[(128), float32] /* span=aten::batch_norm_16.weight:0:0 */, %aten::batch_norm_16.bias: Tensor[(128), float32] /* span=aten::batch_norm_16.bias:0:0 */, %aten::batch_norm_16.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_16.running_mean:0:0 */, %aten::batch_norm_16.running_var: Tensor[(128), float32] /* span=aten::batch_norm_16.running_var:0:0 */, %aten::_convolution_17.weight: Tensor[(512, 128, 1, 1), float32] /* span=aten::_convolution_17.weight:0:0 */, %aten::batch_norm_17.weight: Tensor[(512), float32] /* span=aten::batch_norm_17.weight:0:0 */, %aten::batch_norm_17.bias: Tensor[(512), float32] /* span=aten::batch_norm_17.bias:0:0 */, %aten::batch_norm_17.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_17.running_mean:0:0 */, %aten::batch_norm_17.running_var: Tensor[(512), float32] /* span=aten::batch_norm_17.running_var:0:0 */, %aten::_convolution_18.weight: Tensor[(128, 512, 1, 1), float32] /* span=aten::_convolution_18.weight:0:0 */, %aten::batch_norm_18.weight: Tensor[(128), float32] /* span=aten::batch_norm_18.weight:0:0 */, %aten::batch_norm_18.bias: Tensor[(128), float32] /* span=aten::batch_norm_18.bias:0:0 */, %aten::batch_norm_18.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_18.running_mean:0:0 */, %aten::batch_norm_18.running_var: Tensor[(128), float32] /* span=aten::batch_norm_18.running_var:0:0 */, %aten::_convolution_19.weight: Tensor[(128, 128, 3, 3), float32] /* span=aten::_convolution_19.weight:0:0 */, %aten::batch_norm_19.weight: Tensor[(128), float32] /* span=aten::batch_norm_19.weight:0:0 */, %aten::batch_norm_19.bias: Tensor[(128), float32] /* span=aten::batch_norm_19.bias:0:0 */, %aten::batch_norm_19.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_19.running_mean:0:0 */, %aten::batch_norm_19.running_var: Tensor[(128), float32] /* span=aten::batch_norm_19.running_var:0:0 */, %aten::_convolution_20.weight: Tensor[(512, 128, 1, 1), float32] /* span=aten::_convolution_20.weight:0:0 */, %aten::batch_norm_20.weight: Tensor[(512), float32] /* span=aten::batch_norm_20.weight:0:0 */, %aten::batch_norm_20.bias: Tensor[(512), float32] /* span=aten::batch_norm_20.bias:0:0 */, %aten::batch_norm_20.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_20.running_mean:0:0 */, %aten::batch_norm_20.running_var: Tensor[(512), float32] /* span=aten::batch_norm_20.running_var:0:0 */, %aten::_convolution_21.weight: Tensor[(128, 512, 1, 1), float32] /* span=aten::_convolution_21.weight:0:0 */, %aten::batch_norm_21.weight: Tensor[(128), float32] /* span=aten::batch_norm_21.weight:0:0 */, %aten::batch_norm_21.bias: Tensor[(128), float32] /* span=aten::batch_norm_21.bias:0:0 */, %aten::batch_norm_21.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_21.running_mean:0:0 */, %aten::batch_norm_21.running_var: Tensor[(128), float32] /* span=aten::batch_norm_21.running_var:0:0 */, %aten::_convolution_22.weight: Tensor[(128, 128, 3, 3), float32] /* span=aten::_convolution_22.weight:0:0 */, %aten::batch_norm_22.weight: Tensor[(128), float32] /* span=aten::batch_norm_22.weight:0:0 */, %aten::batch_norm_22.bias: Tensor[(128), float32] /* span=aten::batch_norm_22.bias:0:0 */, %aten::batch_norm_22.running_mean: Tensor[(128), float32] /* span=aten::batch_norm_22.running_mean:0:0 */, %aten::batch_norm_22.running_var: Tensor[(128), float32] /* span=aten::batch_norm_22.running_var:0:0 */, %aten::_convolution_23.weight: Tensor[(512, 128, 1, 1), float32] /* span=aten::_convolution_23.weight:0:0 */, %aten::batch_norm_23.weight: Tensor[(512), float32] /* span=aten::batch_norm_23.weight:0:0 */, %aten::batch_norm_23.bias: Tensor[(512), float32] /* span=aten::batch_norm_23.bias:0:0 */, %aten::batch_norm_23.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_23.running_mean:0:0 */, %aten::batch_norm_23.running_var: Tensor[(512), float32] /* span=aten::batch_norm_23.running_var:0:0 */, %aten::_convolution_24.weight: Tensor[(256, 512, 1, 1), float32] /* span=aten::_convolution_24.weight:0:0 */, %aten::batch_norm_24.weight: Tensor[(256), float32] /* span=aten::batch_norm_24.weight:0:0 */, %aten::batch_norm_24.bias: Tensor[(256), float32] /* span=aten::batch_norm_24.bias:0:0 */, %aten::batch_norm_24.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_24.running_mean:0:0 */, %aten::batch_norm_24.running_var: Tensor[(256), float32] /* span=aten::batch_norm_24.running_var:0:0 */, %aten::_convolution_25.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_25.weight:0:0 */, %aten::batch_norm_25.weight: Tensor[(256), float32] /* span=aten::batch_norm_25.weight:0:0 */, %aten::batch_norm_25.bias: Tensor[(256), float32] /* span=aten::batch_norm_25.bias:0:0 */, %aten::batch_norm_25.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_25.running_mean:0:0 */, %aten::batch_norm_25.running_var: Tensor[(256), float32] /* span=aten::batch_norm_25.running_var:0:0 */, %aten::_convolution_26.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_26.weight:0:0 */, %aten::batch_norm_26.weight: Tensor[(1024), float32] /* span=aten::batch_norm_26.weight:0:0 */, %aten::batch_norm_26.bias: Tensor[(1024), float32] /* span=aten::batch_norm_26.bias:0:0 */, %aten::batch_norm_26.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_26.running_mean:0:0 */, %aten::batch_norm_26.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_26.running_var:0:0 */, %aten::_convolution_27.weight: Tensor[(1024, 512, 1, 1), float32] /* span=aten::_convolution_27.weight:0:0 */, %aten::batch_norm_27.weight: Tensor[(1024), float32] /* span=aten::batch_norm_27.weight:0:0 */, %aten::batch_norm_27.bias: Tensor[(1024), float32] /* span=aten::batch_norm_27.bias:0:0 */, %aten::batch_norm_27.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_27.running_mean:0:0 */, %aten::batch_norm_27.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_27.running_var:0:0 */, %aten::_convolution_28.weight: Tensor[(256, 1024, 1, 1), float32] /* span=aten::_convolution_28.weight:0:0 */, %aten::batch_norm_28.weight: Tensor[(256), float32] /* span=aten::batch_norm_28.weight:0:0 */, %aten::batch_norm_28.bias: Tensor[(256), float32] /* span=aten::batch_norm_28.bias:0:0 */, %aten::batch_norm_28.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_28.running_mean:0:0 */, %aten::batch_norm_28.running_var: Tensor[(256), float32] /* span=aten::batch_norm_28.running_var:0:0 */, %aten::_convolution_29.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_29.weight:0:0 */, %aten::batch_norm_29.weight: Tensor[(256), float32] /* span=aten::batch_norm_29.weight:0:0 */, %aten::batch_norm_29.bias: Tensor[(256), float32] /* span=aten::batch_norm_29.bias:0:0 */, %aten::batch_norm_29.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_29.running_mean:0:0 */, %aten::batch_norm_29.running_var: Tensor[(256), float32] /* span=aten::batch_norm_29.running_var:0:0 */, %aten::_convolution_30.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_30.weight:0:0 */, %aten::batch_norm_30.weight: Tensor[(1024), float32] /* span=aten::batch_norm_30.weight:0:0 */, %aten::batch_norm_30.bias: Tensor[(1024), float32] /* span=aten::batch_norm_30.bias:0:0 */, %aten::batch_norm_30.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_30.running_mean:0:0 */, %aten::batch_norm_30.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_30.running_var:0:0 */, %aten::_convolution_31.weight: Tensor[(256, 1024, 1, 1), float32] /* span=aten::_convolution_31.weight:0:0 */, %aten::batch_norm_31.weight: Tensor[(256), float32] /* span=aten::batch_norm_31.weight:0:0 */, %aten::batch_norm_31.bias: Tensor[(256), float32] /* span=aten::batch_norm_31.bias:0:0 */, %aten::batch_norm_31.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_31.running_mean:0:0 */, %aten::batch_norm_31.running_var: Tensor[(256), float32] /* span=aten::batch_norm_31.running_var:0:0 */, %aten::_convolution_32.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_32.weight:0:0 */, %aten::batch_norm_32.weight: Tensor[(256), float32] /* span=aten::batch_norm_32.weight:0:0 */, %aten::batch_norm_32.bias: Tensor[(256), float32] /* span=aten::batch_norm_32.bias:0:0 */, %aten::batch_norm_32.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_32.running_mean:0:0 */, %aten::batch_norm_32.running_var: Tensor[(256), float32] /* span=aten::batch_norm_32.running_var:0:0 */, %aten::_convolution_33.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_33.weight:0:0 */, %aten::batch_norm_33.weight: Tensor[(1024), float32] /* span=aten::batch_norm_33.weight:0:0 */, %aten::batch_norm_33.bias: Tensor[(1024), float32] /* span=aten::batch_norm_33.bias:0:0 */, %aten::batch_norm_33.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_33.running_mean:0:0 */, %aten::batch_norm_33.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_33.running_var:0:0 */, %aten::_convolution_34.weight: Tensor[(256, 1024, 1, 1), float32] /* span=aten::_convolution_34.weight:0:0 */, %aten::batch_norm_34.weight: Tensor[(256), float32] /* span=aten::batch_norm_34.weight:0:0 */, %aten::batch_norm_34.bias: Tensor[(256), float32] /* span=aten::batch_norm_34.bias:0:0 */, %aten::batch_norm_34.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_34.running_mean:0:0 */, %aten::batch_norm_34.running_var: Tensor[(256), float32] /* span=aten::batch_norm_34.running_var:0:0 */, %aten::_convolution_35.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_35.weight:0:0 */, %aten::batch_norm_35.weight: Tensor[(256), float32] /* span=aten::batch_norm_35.weight:0:0 */, %aten::batch_norm_35.bias: Tensor[(256), float32] /* span=aten::batch_norm_35.bias:0:0 */, %aten::batch_norm_35.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_35.running_mean:0:0 */, %aten::batch_norm_35.running_var: Tensor[(256), float32] /* span=aten::batch_norm_35.running_var:0:0 */, %aten::_convolution_36.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_36.weight:0:0 */, %aten::batch_norm_36.weight: Tensor[(1024), float32] /* span=aten::batch_norm_36.weight:0:0 */, %aten::batch_norm_36.bias: Tensor[(1024), float32] /* span=aten::batch_norm_36.bias:0:0 */, %aten::batch_norm_36.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_36.running_mean:0:0 */, %aten::batch_norm_36.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_36.running_var:0:0 */, %aten::_convolution_37.weight: Tensor[(256, 1024, 1, 1), float32] /* span=aten::_convolution_37.weight:0:0 */, %aten::batch_norm_37.weight: Tensor[(256), float32] /* span=aten::batch_norm_37.weight:0:0 */, %aten::batch_norm_37.bias: Tensor[(256), float32] /* span=aten::batch_norm_37.bias:0:0 */, %aten::batch_norm_37.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_37.running_mean:0:0 */, %aten::batch_norm_37.running_var: Tensor[(256), float32] /* span=aten::batch_norm_37.running_var:0:0 */, %aten::_convolution_38.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_38.weight:0:0 */, %aten::batch_norm_38.weight: Tensor[(256), float32] /* span=aten::batch_norm_38.weight:0:0 */, %aten::batch_norm_38.bias: Tensor[(256), float32] /* span=aten::batch_norm_38.bias:0:0 */, %aten::batch_norm_38.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_38.running_mean:0:0 */, %aten::batch_norm_38.running_var: Tensor[(256), float32] /* span=aten::batch_norm_38.running_var:0:0 */, %aten::_convolution_39.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_39.weight:0:0 */, %aten::batch_norm_39.weight: Tensor[(1024), float32] /* span=aten::batch_norm_39.weight:0:0 */, %aten::batch_norm_39.bias: Tensor[(1024), float32] /* span=aten::batch_norm_39.bias:0:0 */, %aten::batch_norm_39.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_39.running_mean:0:0 */, %aten::batch_norm_39.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_39.running_var:0:0 */, %aten::_convolution_40.weight: Tensor[(256, 1024, 1, 1), float32] /* span=aten::_convolution_40.weight:0:0 */, %aten::batch_norm_40.weight: Tensor[(256), float32] /* span=aten::batch_norm_40.weight:0:0 */, %aten::batch_norm_40.bias: Tensor[(256), float32] /* span=aten::batch_norm_40.bias:0:0 */, %aten::batch_norm_40.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_40.running_mean:0:0 */, %aten::batch_norm_40.running_var: Tensor[(256), float32] /* span=aten::batch_norm_40.running_var:0:0 */, %aten::_convolution_41.weight: Tensor[(256, 256, 3, 3), float32] /* span=aten::_convolution_41.weight:0:0 */, %aten::batch_norm_41.weight: Tensor[(256), float32] /* span=aten::batch_norm_41.weight:0:0 */, %aten::batch_norm_41.bias: Tensor[(256), float32] /* span=aten::batch_norm_41.bias:0:0 */, %aten::batch_norm_41.running_mean: Tensor[(256), float32] /* span=aten::batch_norm_41.running_mean:0:0 */, %aten::batch_norm_41.running_var: Tensor[(256), float32] /* span=aten::batch_norm_41.running_var:0:0 */, %aten::_convolution_42.weight: Tensor[(1024, 256, 1, 1), float32] /* span=aten::_convolution_42.weight:0:0 */, %aten::batch_norm_42.weight: Tensor[(1024), float32] /* span=aten::batch_norm_42.weight:0:0 */, %aten::batch_norm_42.bias: Tensor[(1024), float32] /* span=aten::batch_norm_42.bias:0:0 */, %aten::batch_norm_42.running_mean: Tensor[(1024), float32] /* span=aten::batch_norm_42.running_mean:0:0 */, %aten::batch_norm_42.running_var: Tensor[(1024), float32] /* span=aten::batch_norm_42.running_var:0:0 */, %aten::_convolution_43.weight: Tensor[(512, 1024, 1, 1), float32] /* span=aten::_convolution_43.weight:0:0 */, %aten::batch_norm_43.weight: Tensor[(512), float32] /* span=aten::batch_norm_43.weight:0:0 */, %aten::batch_norm_43.bias: Tensor[(512), float32] /* span=aten::batch_norm_43.bias:0:0 */, %aten::batch_norm_43.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_43.running_mean:0:0 */, %aten::batch_norm_43.running_var: Tensor[(512), float32] /* span=aten::batch_norm_43.running_var:0:0 */, %aten::_convolution_44.weight: Tensor[(512, 512, 3, 3), float32] /* span=aten::_convolution_44.weight:0:0 */, %aten::batch_norm_44.weight: Tensor[(512), float32] /* span=aten::batch_norm_44.weight:0:0 */, %aten::batch_norm_44.bias: Tensor[(512), float32] /* span=aten::batch_norm_44.bias:0:0 */, %aten::batch_norm_44.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_44.running_mean:0:0 */, %aten::batch_norm_44.running_var: Tensor[(512), float32] /* span=aten::batch_norm_44.running_var:0:0 */, %aten::_convolution_45.weight: Tensor[(2048, 512, 1, 1), float32] /* span=aten::_convolution_45.weight:0:0 */, %aten::batch_norm_45.weight: Tensor[(2048), float32] /* span=aten::batch_norm_45.weight:0:0 */, %aten::batch_norm_45.bias: Tensor[(2048), float32] /* span=aten::batch_norm_45.bias:0:0 */, %aten::batch_norm_45.running_mean: Tensor[(2048), float32] /* span=aten::batch_norm_45.running_mean:0:0 */, %aten::batch_norm_45.running_var: Tensor[(2048), float32] /* span=aten::batch_norm_45.running_var:0:0 */, %aten::_convolution_46.weight: Tensor[(2048, 1024, 1, 1), float32] /* span=aten::_convolution_46.weight:0:0 */, %aten::batch_norm_46.weight: Tensor[(2048), float32] /* span=aten::batch_norm_46.weight:0:0 */, %aten::batch_norm_46.bias: Tensor[(2048), float32] /* span=aten::batch_norm_46.bias:0:0 */, %aten::batch_norm_46.running_mean: Tensor[(2048), float32] /* span=aten::batch_norm_46.running_mean:0:0 */, %aten::batch_norm_46.running_var: Tensor[(2048), float32] /* span=aten::batch_norm_46.running_var:0:0 */, %aten::_convolution_47.weight: Tensor[(512, 2048, 1, 1), float32] /* span=aten::_convolution_47.weight:0:0 */, %aten::batch_norm_47.weight: Tensor[(512), float32] /* span=aten::batch_norm_47.weight:0:0 */, %aten::batch_norm_47.bias: Tensor[(512), float32] /* span=aten::batch_norm_47.bias:0:0 */, %aten::batch_norm_47.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_47.running_mean:0:0 */, %aten::batch_norm_47.running_var: Tensor[(512), float32] /* span=aten::batch_norm_47.running_var:0:0 */, %aten::_convolution_48.weight: Tensor[(512, 512, 3, 3), float32] /* span=aten::_convolution_48.weight:0:0 */, %aten::batch_norm_48.weight: Tensor[(512), float32] /* span=aten::batch_norm_48.weight:0:0 */, %aten::batch_norm_48.bias: Tensor[(512), float32] /* span=aten::batch_norm_48.bias:0:0 */, %aten::batch_norm_48.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_48.running_mean:0:0 */, %aten::batch_norm_48.running_var: Tensor[(512), float32] /* span=aten::batch_norm_48.running_var:0:0 */, %aten::_convolution_49.weight: Tensor[(2048, 512, 1, 1), float32] /* span=aten::_convolution_49.weight:0:0 */, %aten::batch_norm_49.weight: Tensor[(2048), float32] /* span=aten::batch_norm_49.weight:0:0 */, %aten::batch_norm_49.bias: Tensor[(2048), float32] /* span=aten::batch_norm_49.bias:0:0 */, %aten::batch_norm_49.running_mean: Tensor[(2048), float32] /* span=aten::batch_norm_49.running_mean:0:0 */, %aten::batch_norm_49.running_var: Tensor[(2048), float32] /* span=aten::batch_norm_49.running_var:0:0 */, %aten::_convolution_50.weight: Tensor[(512, 2048, 1, 1), float32] /* span=aten::_convolution_50.weight:0:0 */, %aten::batch_norm_50.weight: Tensor[(512), float32] /* span=aten::batch_norm_50.weight:0:0 */, %aten::batch_norm_50.bias: Tensor[(512), float32] /* span=aten::batch_norm_50.bias:0:0 */, %aten::batch_norm_50.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_50.running_mean:0:0 */, %aten::batch_norm_50.running_var: Tensor[(512), float32] /* span=aten::batch_norm_50.running_var:0:0 */, %aten::_convolution_51.weight: Tensor[(512, 512, 3, 3), float32] /* span=aten::_convolution_51.weight:0:0 */, %aten::batch_norm_51.weight: Tensor[(512), float32] /* span=aten::batch_norm_51.weight:0:0 */, %aten::batch_norm_51.bias: Tensor[(512), float32] /* span=aten::batch_norm_51.bias:0:0 */, %aten::batch_norm_51.running_mean: Tensor[(512), float32] /* span=aten::batch_norm_51.running_mean:0:0 */, %aten::batch_norm_51.running_var: Tensor[(512), float32] /* span=aten::batch_norm_51.running_var:0:0 */, %aten::_convolution_52.weight: Tensor[(2048, 512, 1, 1), float32] /* span=aten::_convolution_52.weight:0:0 */, %aten::batch_norm_52.weight: Tensor[(2048), float32] /* span=aten::batch_norm_52.weight:0:0 */, %aten::batch_norm_52.bias: Tensor[(2048), float32] /* span=aten::batch_norm_52.bias:0:0 */, %aten::batch_norm_52.running_mean: Tensor[(2048), float32] /* span=aten::batch_norm_52.running_mean:0:0 */, %aten::batch_norm_52.running_var: Tensor[(2048), float32] /* span=aten::batch_norm_52.running_var:0:0 */, %aten::linear_0.weight: Tensor[(1000, 2048), float32] /* span=aten::linear_0.weight:0:0 */, %aten::linear_0.bias: Tensor[(1000), float32] /* span=aten::linear_0.bias:0:0 */) {
%0 = nn.conv2d(%input, %aten::_convolution_0.weight, strides=[2, 2], padding=[3, 3, 3, 3], channels=64, kernel_size=[7, 7]) /* span=aten::_convolution_0:0:0 */;
%1 = nn.batch_norm(%0, %aten::batch_norm_0.weight, %aten::batch_norm_0.bias, %aten::batch_norm_0.running_mean, %aten::batch_norm_0.running_var) /* span=aten::batch_norm_0:0:0 */;
%2 = %1.0 /* span=aten::batch_norm_0:0:0 */;
%3 = nn.relu(%2) /* span=aten::relu__0:0:0 */;
%4 = nn.max_pool2d(%3, pool_size=[3, 3], strides=[2, 2], padding=[1, 1, 1, 1]) /* span=aten::max_pool2d_0:0:0 */;
%5 = nn.conv2d(%4, %aten::_convolution_1.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* span=aten::_convolution_1:0:0 */;
%6 = nn.batch_norm(%5, %aten::batch_norm_1.weight, %aten::batch_norm_1.bias, %aten::batch_norm_1.running_mean, %aten::batch_norm_1.running_var) /* span=aten::batch_norm_1:0:0 */;
%7 = %6.0 /* span=aten::batch_norm_1:0:0 */;
%8 = nn.relu(%7) /* span=aten::relu__1:0:0 */;
%9 = nn.conv2d(%8, %aten::_convolution_2.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* span=aten::_convolution_2:0:0 */;
%10 = nn.batch_norm(%9, %aten::batch_norm_2.weight, %aten::batch_norm_2.bias, %aten::batch_norm_2.running_mean, %aten::batch_norm_2.running_var) /* span=aten::batch_norm_2:0:0 */;
%11 = %10.0 /* span=aten::batch_norm_2:0:0 */;
%12 = nn.relu(%11) /* span=aten::relu__2:0:0 */;
%13 = nn.conv2d(%12, %aten::_convolution_3.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_3:0:0 */;
%14 = nn.batch_norm(%13, %aten::batch_norm_3.weight, %aten::batch_norm_3.bias, %aten::batch_norm_3.running_mean, %aten::batch_norm_3.running_var) /* span=aten::batch_norm_3:0:0 */;
%15 = nn.conv2d(%4, %aten::_convolution_4.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_4:0:0 */;
%16 = nn.batch_norm(%15, %aten::batch_norm_4.weight, %aten::batch_norm_4.bias, %aten::batch_norm_4.running_mean, %aten::batch_norm_4.running_var) /* span=aten::batch_norm_4:0:0 */;
%17 = %14.0 /* span=aten::batch_norm_3:0:0 */;
%18 = %16.0 /* span=aten::batch_norm_4:0:0 */;
%19 = add(%17, %18) /* span=aten::add__0:0:0 */;
%20 = nn.relu(%19) /* span=aten::relu__3:0:0 */;
%21 = nn.conv2d(%20, %aten::_convolution_5.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* span=aten::_convolution_5:0:0 */;
%22 = nn.batch_norm(%21, %aten::batch_norm_5.weight, %aten::batch_norm_5.bias, %aten::batch_norm_5.running_mean, %aten::batch_norm_5.running_var) /* span=aten::batch_norm_5:0:0 */;
%23 = %22.0 /* span=aten::batch_norm_5:0:0 */;
%24 = nn.relu(%23) /* span=aten::relu__4:0:0 */;
%25 = nn.conv2d(%24, %aten::_convolution_6.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* span=aten::_convolution_6:0:0 */;
%26 = nn.batch_norm(%25, %aten::batch_norm_6.weight, %aten::batch_norm_6.bias, %aten::batch_norm_6.running_mean, %aten::batch_norm_6.running_var) /* span=aten::batch_norm_6:0:0 */;
%27 = %26.0 /* span=aten::batch_norm_6:0:0 */;
%28 = nn.relu(%27) /* span=aten::relu__5:0:0 */;
%29 = nn.conv2d(%28, %aten::_convolution_7.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_7:0:0 */;
%30 = nn.batch_norm(%29, %aten::batch_norm_7.weight, %aten::batch_norm_7.bias, %aten::batch_norm_7.running_mean, %aten::batch_norm_7.running_var) /* span=aten::batch_norm_7:0:0 */;
%31 = %30.0 /* span=aten::batch_norm_7:0:0 */;
%32 = add(%31, %20) /* span=aten::add__1:0:0 */;
%33 = nn.relu(%32) /* span=aten::relu__6:0:0 */;
%34 = nn.conv2d(%33, %aten::_convolution_8.weight, padding=[0, 0, 0, 0], channels=64, kernel_size=[1, 1]) /* span=aten::_convolution_8:0:0 */;
%35 = nn.batch_norm(%34, %aten::batch_norm_8.weight, %aten::batch_norm_8.bias, %aten::batch_norm_8.running_mean, %aten::batch_norm_8.running_var) /* span=aten::batch_norm_8:0:0 */;
%36 = %35.0 /* span=aten::batch_norm_8:0:0 */;
%37 = nn.relu(%36) /* span=aten::relu__7:0:0 */;
%38 = nn.conv2d(%37, %aten::_convolution_9.weight, padding=[1, 1, 1, 1], channels=64, kernel_size=[3, 3]) /* span=aten::_convolution_9:0:0 */;
%39 = nn.batch_norm(%38, %aten::batch_norm_9.weight, %aten::batch_norm_9.bias, %aten::batch_norm_9.running_mean, %aten::batch_norm_9.running_var) /* span=aten::batch_norm_9:0:0 */;
%40 = %39.0 /* span=aten::batch_norm_9:0:0 */;
%41 = nn.relu(%40) /* span=aten::relu__8:0:0 */;
%42 = nn.conv2d(%41, %aten::_convolution_10.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_10:0:0 */;
%43 = nn.batch_norm(%42, %aten::batch_norm_10.weight, %aten::batch_norm_10.bias, %aten::batch_norm_10.running_mean, %aten::batch_norm_10.running_var) /* span=aten::batch_norm_10:0:0 */;
%44 = %43.0 /* span=aten::batch_norm_10:0:0 */;
%45 = add(%44, %33) /* span=aten::add__2:0:0 */;
%46 = nn.relu(%45) /* span=aten::relu__9:0:0 */;
%47 = nn.conv2d(%46, %aten::_convolution_11.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* span=aten::_convolution_11:0:0 */;
%48 = nn.batch_norm(%47, %aten::batch_norm_11.weight, %aten::batch_norm_11.bias, %aten::batch_norm_11.running_mean, %aten::batch_norm_11.running_var) /* span=aten::batch_norm_11:0:0 */;
%49 = %48.0 /* span=aten::batch_norm_11:0:0 */;
%50 = nn.relu(%49) /* span=aten::relu__10:0:0 */;
%51 = nn.conv2d(%50, %aten::_convolution_12.weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* span=aten::_convolution_12:0:0 */;
%52 = nn.batch_norm(%51, %aten::batch_norm_12.weight, %aten::batch_norm_12.bias, %aten::batch_norm_12.running_mean, %aten::batch_norm_12.running_var) /* span=aten::batch_norm_12:0:0 */;
%53 = %52.0 /* span=aten::batch_norm_12:0:0 */;
%54 = nn.relu(%53) /* span=aten::relu__11:0:0 */;
%55 = nn.conv2d(%54, %aten::_convolution_13.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_13:0:0 */;
%56 = nn.batch_norm(%55, %aten::batch_norm_13.weight, %aten::batch_norm_13.bias, %aten::batch_norm_13.running_mean, %aten::batch_norm_13.running_var) /* span=aten::batch_norm_13:0:0 */;
%57 = nn.conv2d(%46, %aten::_convolution_14.weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_14:0:0 */;
%58 = nn.batch_norm(%57, %aten::batch_norm_14.weight, %aten::batch_norm_14.bias, %aten::batch_norm_14.running_mean, %aten::batch_norm_14.running_var) /* span=aten::batch_norm_14:0:0 */;
%59 = %56.0 /* span=aten::batch_norm_13:0:0 */;
%60 = %58.0 /* span=aten::batch_norm_14:0:0 */;
%61 = add(%59, %60) /* span=aten::add__3:0:0 */;
%62 = nn.relu(%61) /* span=aten::relu__12:0:0 */;
%63 = nn.conv2d(%62, %aten::_convolution_15.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* span=aten::_convolution_15:0:0 */;
%64 = nn.batch_norm(%63, %aten::batch_norm_15.weight, %aten::batch_norm_15.bias, %aten::batch_norm_15.running_mean, %aten::batch_norm_15.running_var) /* span=aten::batch_norm_15:0:0 */;
%65 = %64.0 /* span=aten::batch_norm_15:0:0 */;
%66 = nn.relu(%65) /* span=aten::relu__13:0:0 */;
%67 = nn.conv2d(%66, %aten::_convolution_16.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* span=aten::_convolution_16:0:0 */;
%68 = nn.batch_norm(%67, %aten::batch_norm_16.weight, %aten::batch_norm_16.bias, %aten::batch_norm_16.running_mean, %aten::batch_norm_16.running_var) /* span=aten::batch_norm_16:0:0 */;
%69 = %68.0 /* span=aten::batch_norm_16:0:0 */;
%70 = nn.relu(%69) /* span=aten::relu__14:0:0 */;
%71 = nn.conv2d(%70, %aten::_convolution_17.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_17:0:0 */;
%72 = nn.batch_norm(%71, %aten::batch_norm_17.weight, %aten::batch_norm_17.bias, %aten::batch_norm_17.running_mean, %aten::batch_norm_17.running_var) /* span=aten::batch_norm_17:0:0 */;
%73 = %72.0 /* span=aten::batch_norm_17:0:0 */;
%74 = add(%73, %62) /* span=aten::add__4:0:0 */;
%75 = nn.relu(%74) /* span=aten::relu__15:0:0 */;
%76 = nn.conv2d(%75, %aten::_convolution_18.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* span=aten::_convolution_18:0:0 */;
%77 = nn.batch_norm(%76, %aten::batch_norm_18.weight, %aten::batch_norm_18.bias, %aten::batch_norm_18.running_mean, %aten::batch_norm_18.running_var) /* span=aten::batch_norm_18:0:0 */;
%78 = %77.0 /* span=aten::batch_norm_18:0:0 */;
%79 = nn.relu(%78) /* span=aten::relu__16:0:0 */;
%80 = nn.conv2d(%79, %aten::_convolution_19.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* span=aten::_convolution_19:0:0 */;
%81 = nn.batch_norm(%80, %aten::batch_norm_19.weight, %aten::batch_norm_19.bias, %aten::batch_norm_19.running_mean, %aten::batch_norm_19.running_var) /* span=aten::batch_norm_19:0:0 */;
%82 = %81.0 /* span=aten::batch_norm_19:0:0 */;
%83 = nn.relu(%82) /* span=aten::relu__17:0:0 */;
%84 = nn.conv2d(%83, %aten::_convolution_20.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_20:0:0 */;
%85 = nn.batch_norm(%84, %aten::batch_norm_20.weight, %aten::batch_norm_20.bias, %aten::batch_norm_20.running_mean, %aten::batch_norm_20.running_var) /* span=aten::batch_norm_20:0:0 */;
%86 = %85.0 /* span=aten::batch_norm_20:0:0 */;
%87 = add(%86, %75) /* span=aten::add__5:0:0 */;
%88 = nn.relu(%87) /* span=aten::relu__18:0:0 */;
%89 = nn.conv2d(%88, %aten::_convolution_21.weight, padding=[0, 0, 0, 0], channels=128, kernel_size=[1, 1]) /* span=aten::_convolution_21:0:0 */;
%90 = nn.batch_norm(%89, %aten::batch_norm_21.weight, %aten::batch_norm_21.bias, %aten::batch_norm_21.running_mean, %aten::batch_norm_21.running_var) /* span=aten::batch_norm_21:0:0 */;
%91 = %90.0 /* span=aten::batch_norm_21:0:0 */;
%92 = nn.relu(%91) /* span=aten::relu__19:0:0 */;
%93 = nn.conv2d(%92, %aten::_convolution_22.weight, padding=[1, 1, 1, 1], channels=128, kernel_size=[3, 3]) /* span=aten::_convolution_22:0:0 */;
%94 = nn.batch_norm(%93, %aten::batch_norm_22.weight, %aten::batch_norm_22.bias, %aten::batch_norm_22.running_mean, %aten::batch_norm_22.running_var) /* span=aten::batch_norm_22:0:0 */;
%95 = %94.0 /* span=aten::batch_norm_22:0:0 */;
%96 = nn.relu(%95) /* span=aten::relu__20:0:0 */;
%97 = nn.conv2d(%96, %aten::_convolution_23.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_23:0:0 */;
%98 = nn.batch_norm(%97, %aten::batch_norm_23.weight, %aten::batch_norm_23.bias, %aten::batch_norm_23.running_mean, %aten::batch_norm_23.running_var) /* span=aten::batch_norm_23:0:0 */;
%99 = %98.0 /* span=aten::batch_norm_23:0:0 */;
%100 = add(%99, %88) /* span=aten::add__6:0:0 */;
%101 = nn.relu(%100) /* span=aten::relu__21:0:0 */;
%102 = nn.conv2d(%101, %aten::_convolution_24.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_24:0:0 */;
%103 = nn.batch_norm(%102, %aten::batch_norm_24.weight, %aten::batch_norm_24.bias, %aten::batch_norm_24.running_mean, %aten::batch_norm_24.running_var) /* span=aten::batch_norm_24:0:0 */;
%104 = %103.0 /* span=aten::batch_norm_24:0:0 */;
%105 = nn.relu(%104) /* span=aten::relu__22:0:0 */;
%106 = nn.conv2d(%105, %aten::_convolution_25.weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_25:0:0 */;
%107 = nn.batch_norm(%106, %aten::batch_norm_25.weight, %aten::batch_norm_25.bias, %aten::batch_norm_25.running_mean, %aten::batch_norm_25.running_var) /* span=aten::batch_norm_25:0:0 */;
%108 = %107.0 /* span=aten::batch_norm_25:0:0 */;
%109 = nn.relu(%108) /* span=aten::relu__23:0:0 */;
%110 = nn.conv2d(%109, %aten::_convolution_26.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_26:0:0 */;
%111 = nn.batch_norm(%110, %aten::batch_norm_26.weight, %aten::batch_norm_26.bias, %aten::batch_norm_26.running_mean, %aten::batch_norm_26.running_var) /* span=aten::batch_norm_26:0:0 */;
%112 = nn.conv2d(%101, %aten::_convolution_27.weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_27:0:0 */;
%113 = nn.batch_norm(%112, %aten::batch_norm_27.weight, %aten::batch_norm_27.bias, %aten::batch_norm_27.running_mean, %aten::batch_norm_27.running_var) /* span=aten::batch_norm_27:0:0 */;
%114 = %111.0 /* span=aten::batch_norm_26:0:0 */;
%115 = %113.0 /* span=aten::batch_norm_27:0:0 */;
%116 = add(%114, %115) /* span=aten::add__7:0:0 */;
%117 = nn.relu(%116) /* span=aten::relu__24:0:0 */;
%118 = nn.conv2d(%117, %aten::_convolution_28.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_28:0:0 */;
%119 = nn.batch_norm(%118, %aten::batch_norm_28.weight, %aten::batch_norm_28.bias, %aten::batch_norm_28.running_mean, %aten::batch_norm_28.running_var) /* span=aten::batch_norm_28:0:0 */;
%120 = %119.0 /* span=aten::batch_norm_28:0:0 */;
%121 = nn.relu(%120) /* span=aten::relu__25:0:0 */;
%122 = nn.conv2d(%121, %aten::_convolution_29.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_29:0:0 */;
%123 = nn.batch_norm(%122, %aten::batch_norm_29.weight, %aten::batch_norm_29.bias, %aten::batch_norm_29.running_mean, %aten::batch_norm_29.running_var) /* span=aten::batch_norm_29:0:0 */;
%124 = %123.0 /* span=aten::batch_norm_29:0:0 */;
%125 = nn.relu(%124) /* span=aten::relu__26:0:0 */;
%126 = nn.conv2d(%125, %aten::_convolution_30.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_30:0:0 */;
%127 = nn.batch_norm(%126, %aten::batch_norm_30.weight, %aten::batch_norm_30.bias, %aten::batch_norm_30.running_mean, %aten::batch_norm_30.running_var) /* span=aten::batch_norm_30:0:0 */;
%128 = %127.0 /* span=aten::batch_norm_30:0:0 */;
%129 = add(%128, %117) /* span=aten::add__8:0:0 */;
%130 = nn.relu(%129) /* span=aten::relu__27:0:0 */;
%131 = nn.conv2d(%130, %aten::_convolution_31.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_31:0:0 */;
%132 = nn.batch_norm(%131, %aten::batch_norm_31.weight, %aten::batch_norm_31.bias, %aten::batch_norm_31.running_mean, %aten::batch_norm_31.running_var) /* span=aten::batch_norm_31:0:0 */;
%133 = %132.0 /* span=aten::batch_norm_31:0:0 */;
%134 = nn.relu(%133) /* span=aten::relu__28:0:0 */;
%135 = nn.conv2d(%134, %aten::_convolution_32.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_32:0:0 */;
%136 = nn.batch_norm(%135, %aten::batch_norm_32.weight, %aten::batch_norm_32.bias, %aten::batch_norm_32.running_mean, %aten::batch_norm_32.running_var) /* span=aten::batch_norm_32:0:0 */;
%137 = %136.0 /* span=aten::batch_norm_32:0:0 */;
%138 = nn.relu(%137) /* span=aten::relu__29:0:0 */;
%139 = nn.conv2d(%138, %aten::_convolution_33.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_33:0:0 */;
%140 = nn.batch_norm(%139, %aten::batch_norm_33.weight, %aten::batch_norm_33.bias, %aten::batch_norm_33.running_mean, %aten::batch_norm_33.running_var) /* span=aten::batch_norm_33:0:0 */;
%141 = %140.0 /* span=aten::batch_norm_33:0:0 */;
%142 = add(%141, %130) /* span=aten::add__9:0:0 */;
%143 = nn.relu(%142) /* span=aten::relu__30:0:0 */;
%144 = nn.conv2d(%143, %aten::_convolution_34.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_34:0:0 */;
%145 = nn.batch_norm(%144, %aten::batch_norm_34.weight, %aten::batch_norm_34.bias, %aten::batch_norm_34.running_mean, %aten::batch_norm_34.running_var) /* span=aten::batch_norm_34:0:0 */;
%146 = %145.0 /* span=aten::batch_norm_34:0:0 */;
%147 = nn.relu(%146) /* span=aten::relu__31:0:0 */;
%148 = nn.conv2d(%147, %aten::_convolution_35.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_35:0:0 */;
%149 = nn.batch_norm(%148, %aten::batch_norm_35.weight, %aten::batch_norm_35.bias, %aten::batch_norm_35.running_mean, %aten::batch_norm_35.running_var) /* span=aten::batch_norm_35:0:0 */;
%150 = %149.0 /* span=aten::batch_norm_35:0:0 */;
%151 = nn.relu(%150) /* span=aten::relu__32:0:0 */;
%152 = nn.conv2d(%151, %aten::_convolution_36.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_36:0:0 */;
%153 = nn.batch_norm(%152, %aten::batch_norm_36.weight, %aten::batch_norm_36.bias, %aten::batch_norm_36.running_mean, %aten::batch_norm_36.running_var) /* span=aten::batch_norm_36:0:0 */;
%154 = %153.0 /* span=aten::batch_norm_36:0:0 */;
%155 = add(%154, %143) /* span=aten::add__10:0:0 */;
%156 = nn.relu(%155) /* span=aten::relu__33:0:0 */;
%157 = nn.conv2d(%156, %aten::_convolution_37.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_37:0:0 */;
%158 = nn.batch_norm(%157, %aten::batch_norm_37.weight, %aten::batch_norm_37.bias, %aten::batch_norm_37.running_mean, %aten::batch_norm_37.running_var) /* span=aten::batch_norm_37:0:0 */;
%159 = %158.0 /* span=aten::batch_norm_37:0:0 */;
%160 = nn.relu(%159) /* span=aten::relu__34:0:0 */;
%161 = nn.conv2d(%160, %aten::_convolution_38.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_38:0:0 */;
%162 = nn.batch_norm(%161, %aten::batch_norm_38.weight, %aten::batch_norm_38.bias, %aten::batch_norm_38.running_mean, %aten::batch_norm_38.running_var) /* span=aten::batch_norm_38:0:0 */;
%163 = %162.0 /* span=aten::batch_norm_38:0:0 */;
%164 = nn.relu(%163) /* span=aten::relu__35:0:0 */;
%165 = nn.conv2d(%164, %aten::_convolution_39.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_39:0:0 */;
%166 = nn.batch_norm(%165, %aten::batch_norm_39.weight, %aten::batch_norm_39.bias, %aten::batch_norm_39.running_mean, %aten::batch_norm_39.running_var) /* span=aten::batch_norm_39:0:0 */;
%167 = %166.0 /* span=aten::batch_norm_39:0:0 */;
%168 = add(%167, %156) /* span=aten::add__11:0:0 */;
%169 = nn.relu(%168) /* span=aten::relu__36:0:0 */;
%170 = nn.conv2d(%169, %aten::_convolution_40.weight, padding=[0, 0, 0, 0], channels=256, kernel_size=[1, 1]) /* span=aten::_convolution_40:0:0 */;
%171 = nn.batch_norm(%170, %aten::batch_norm_40.weight, %aten::batch_norm_40.bias, %aten::batch_norm_40.running_mean, %aten::batch_norm_40.running_var) /* span=aten::batch_norm_40:0:0 */;
%172 = %171.0 /* span=aten::batch_norm_40:0:0 */;
%173 = nn.relu(%172) /* span=aten::relu__37:0:0 */;
%174 = nn.conv2d(%173, %aten::_convolution_41.weight, padding=[1, 1, 1, 1], channels=256, kernel_size=[3, 3]) /* span=aten::_convolution_41:0:0 */;
%175 = nn.batch_norm(%174, %aten::batch_norm_41.weight, %aten::batch_norm_41.bias, %aten::batch_norm_41.running_mean, %aten::batch_norm_41.running_var) /* span=aten::batch_norm_41:0:0 */;
%176 = %175.0 /* span=aten::batch_norm_41:0:0 */;
%177 = nn.relu(%176) /* span=aten::relu__38:0:0 */;
%178 = nn.conv2d(%177, %aten::_convolution_42.weight, padding=[0, 0, 0, 0], channels=1024, kernel_size=[1, 1]) /* span=aten::_convolution_42:0:0 */;
%179 = nn.batch_norm(%178, %aten::batch_norm_42.weight, %aten::batch_norm_42.bias, %aten::batch_norm_42.running_mean, %aten::batch_norm_42.running_var) /* span=aten::batch_norm_42:0:0 */;
%180 = %179.0 /* span=aten::batch_norm_42:0:0 */;
%181 = add(%180, %169) /* span=aten::add__12:0:0 */;
%182 = nn.relu(%181) /* span=aten::relu__39:0:0 */;
%183 = nn.conv2d(%182, %aten::_convolution_43.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_43:0:0 */;
%184 = nn.batch_norm(%183, %aten::batch_norm_43.weight, %aten::batch_norm_43.bias, %aten::batch_norm_43.running_mean, %aten::batch_norm_43.running_var) /* span=aten::batch_norm_43:0:0 */;
%185 = %184.0 /* span=aten::batch_norm_43:0:0 */;
%186 = nn.relu(%185) /* span=aten::relu__40:0:0 */;
%187 = nn.conv2d(%186, %aten::_convolution_44.weight, strides=[2, 2], padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* span=aten::_convolution_44:0:0 */;
%188 = nn.batch_norm(%187, %aten::batch_norm_44.weight, %aten::batch_norm_44.bias, %aten::batch_norm_44.running_mean, %aten::batch_norm_44.running_var) /* span=aten::batch_norm_44:0:0 */;
%189 = %188.0 /* span=aten::batch_norm_44:0:0 */;
%190 = nn.relu(%189) /* span=aten::relu__41:0:0 */;
%191 = nn.conv2d(%190, %aten::_convolution_45.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /* span=aten::_convolution_45:0:0 */;
%192 = nn.batch_norm(%191, %aten::batch_norm_45.weight, %aten::batch_norm_45.bias, %aten::batch_norm_45.running_mean, %aten::batch_norm_45.running_var) /* span=aten::batch_norm_45:0:0 */;
%193 = nn.conv2d(%182, %aten::_convolution_46.weight, strides=[2, 2], padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /* span=aten::_convolution_46:0:0 */;
%194 = nn.batch_norm(%193, %aten::batch_norm_46.weight, %aten::batch_norm_46.bias, %aten::batch_norm_46.running_mean, %aten::batch_norm_46.running_var) /* span=aten::batch_norm_46:0:0 */;
%195 = %192.0 /* span=aten::batch_norm_45:0:0 */;
%196 = %194.0 /* span=aten::batch_norm_46:0:0 */;
%197 = add(%195, %196) /* span=aten::add__13:0:0 */;
%198 = nn.relu(%197) /* span=aten::relu__42:0:0 */;
%199 = nn.conv2d(%198, %aten::_convolution_47.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_47:0:0 */;
%200 = nn.batch_norm(%199, %aten::batch_norm_47.weight, %aten::batch_norm_47.bias, %aten::batch_norm_47.running_mean, %aten::batch_norm_47.running_var) /* span=aten::batch_norm_47:0:0 */;
%201 = %200.0 /* span=aten::batch_norm_47:0:0 */;
%202 = nn.relu(%201) /* span=aten::relu__43:0:0 */;
%203 = nn.conv2d(%202, %aten::_convolution_48.weight, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* span=aten::_convolution_48:0:0 */;
%204 = nn.batch_norm(%203, %aten::batch_norm_48.weight, %aten::batch_norm_48.bias, %aten::batch_norm_48.running_mean, %aten::batch_norm_48.running_var) /* span=aten::batch_norm_48:0:0 */;
%205 = %204.0 /* span=aten::batch_norm_48:0:0 */;
%206 = nn.relu(%205) /* span=aten::relu__44:0:0 */;
%207 = nn.conv2d(%206, %aten::_convolution_49.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /* span=aten::_convolution_49:0:0 */;
%208 = nn.batch_norm(%207, %aten::batch_norm_49.weight, %aten::batch_norm_49.bias, %aten::batch_norm_49.running_mean, %aten::batch_norm_49.running_var) /* span=aten::batch_norm_49:0:0 */;
%209 = %208.0 /* span=aten::batch_norm_49:0:0 */;
%210 = add(%209, %198) /* span=aten::add__14:0:0 */;
%211 = nn.relu(%210) /* span=aten::relu__45:0:0 */;
%212 = nn.conv2d(%211, %aten::_convolution_50.weight, padding=[0, 0, 0, 0], channels=512, kernel_size=[1, 1]) /* span=aten::_convolution_50:0:0 */;
%213 = nn.batch_norm(%212, %aten::batch_norm_50.weight, %aten::batch_norm_50.bias, %aten::batch_norm_50.running_mean, %aten::batch_norm_50.running_var) /* span=aten::batch_norm_50:0:0 */;
%214 = %213.0 /* span=aten::batch_norm_50:0:0 */;
%215 = nn.relu(%214) /* span=aten::relu__46:0:0 */;
%216 = nn.conv2d(%215, %aten::_convolution_51.weight, padding=[1, 1, 1, 1], channels=512, kernel_size=[3, 3]) /* span=aten::_convolution_51:0:0 */;
%217 = nn.batch_norm(%216, %aten::batch_norm_51.weight, %aten::batch_norm_51.bias, %aten::batch_norm_51.running_mean, %aten::batch_norm_51.running_var) /* span=aten::batch_norm_51:0:0 */;
%218 = %217.0 /* span=aten::batch_norm_51:0:0 */;
%219 = nn.relu(%218) /* span=aten::relu__47:0:0 */;
%220 = nn.conv2d(%219, %aten::_convolution_52.weight, padding=[0, 0, 0, 0], channels=2048, kernel_size=[1, 1]) /* span=aten::_convolution_52:0:0 */;
%221 = nn.batch_norm(%220, %aten::batch_norm_52.weight, %aten::batch_norm_52.bias, %aten::batch_norm_52.running_mean, %aten::batch_norm_52.running_var) /* span=aten::batch_norm_52:0:0 */;
%222 = %221.0 /* span=aten::batch_norm_52:0:0 */;
%223 = add(%222, %211) /* span=aten::add__15:0:0 */;
%224 = nn.relu(%223) /* span=aten::relu__48:0:0 */;
%225 = nn.adaptive_avg_pool2d(%224, output_size=[1, 1]) /* span=aten::adaptive_avg_pool2d_0:0:0 */;
%226 = reshape(%225, newshape=[0, -1, 1, 1]) /* span=aten::flatten_0:0:0 */;
%227 = squeeze(%226, axis=[2, 3]) /* span=aten::flatten_0:0:0 */;
%228 = nn.dense(%227, %aten::linear_0.weight, units=None) /* span=aten::linear_0:0:0 */;
nn.bias_add(%228, %aten::linear_0.bias, axis=-1) /* span=aten::linear_0:0:0 */
}
AutoTuning¶
Now, the below api can be used for autotuning the model for any target. Tuning required RPC setup and please refer to Deploy to Adreno GPU
rpc_tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
rpc_tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
rpc_key = "android"
rpc_tracker = rpc_tracker_host + ":" + str(rpc_tracker_port)
# Auto tuning is compute intensive and time taking task.
# It is set to False in above configuration as this script runs in x86 for demonstration.
# Please to set :code:`is_tuning` to True to enable auto tuning.
# Also, :code:`test_target` is set to :code:`llvm` as this example to make compatible for x86 demonstration.
# Please change it to :code:`opencl` or :code:`opencl -device=adreno` for RPC target in configuration above.
if is_tuning:
tvmc.tune(
tvmc_model,
target=target,
tuning_records=tune_log,
target_host=target_host,
hostname=rpc_tracker_host,
port=rpc_tracker_port,
rpc_key=rpc_key,
tuner="xgb",
repeat=30,
trials=3,
early_stopping=0,
)
Compilation¶
Compilation to produce tvm artifacts
# This generated example running on our x86 server for demonstration.
# To deply and tun on real target over RPC please set :code:`local_demo` to False in above configuration sestion.
# OpenCLML offloading will try to accelerate supported operators by using OpenCLML proprietory operator library.
# By default :code:`enable_clml` is set to False in above configuration section.
if not enable_clml:
if local_demo:
tvmc_package = tvmc.compile(
tvmc_model,
target=target,
)
else:
tvmc_package = tvmc.compile(
tvmc_model,
target=target,
target_host=target_host,
cross=cross_compiler,
tuning_records=tune_log,
)
else:
# Altrernatively, we can save the compilation output and save it as a TVMCPackage.
# This way avoids loading of compiled module without compiling again.
target = target + ", clml"
pkg_path = tmp_path.relpath("torch-resnet50.tar")
tvmc.compile(
tvmc_model,
target=target,
target_host=target_host,
cross=cross_compiler,
tuning_records=tune_log,
package_path=pkg_path,
)
# Load the compiled package
tvmc_package = TVMCPackage(package_path=pkg_path)
# tvmc_package consists of tvmc_package.lib_path, tvmc_package.graph, tvmc_package.params
# Saved TVMPackage is nothing but tar archive with mod.so, mod.json and mod.params.
Deploy & Run¶
Deploy and run the compiled model on RPC Let tvmc fill inputs using random
# Run on RPC setup
if local_demo:
result = tvmc.run(tvmc_package, device="cpu", fill_mode="random")
else:
result = tvmc.run(
tvmc_package,
device="cl",
rpc_key=rpc_key,
hostname=rpc_tracker_host,
port=rpc_tracker_port,
fill_mode="random",
)
# result is a dictionary of outputs.
print("Result:", result)
Result: []
Output Names:
['output_0']
Total running time of the script: ( 1 minutes 6.369 seconds)