24 #ifndef TVM_TOPI_NN_POOLING_H_
25 #define TVM_TOPI_NN_POOLING_H_
52 bool ceil_mode,
const size_t height_axis,
const size_t width_axis,
53 bool count_include_pad) {
54 ICHECK(out_grad->shape.size() >= 2) <<
"Pooling grad output must >= 2-D (H, W)";
55 ICHECK(x->shape.size() >= 2) <<
"Pooling input must >= 2-D (H, W)";
56 ICHECK_EQ(kernel_size.
size(), 2) <<
"Pooling kernel_size must have 2 elements";
57 ICHECK_EQ(stride_size.
size(), 2) <<
"Pooling stride_size must have 2 elements";
58 ICHECK_EQ(padding_size.
size(), 4) <<
"Pooling padding_size must have 4 elements";
60 auto kernel_height = kernel_size[0];
61 auto kernel_width = kernel_size[1];
62 auto stride_height = stride_size[0];
63 auto stride_width = stride_size[1];
65 auto height = x->shape[height_axis];
66 auto width = x->shape[width_axis];
68 auto pad_top = padding_size[0];
69 auto pad_left = padding_size[1];
70 auto pad_bottom = padding_size[2];
71 auto pad_right = padding_size[3];
76 pad_bottom += stride_height - 1;
77 pad_right += stride_width - 1;
81 pad_before.
Set(height_axis, pad_top);
82 pad_before.
Set(width_axis, pad_left);
85 pad_after.
Set(height_axis, pad_bottom);
86 pad_after.
Set(width_axis, pad_right);
89 analyzer.
Simplify((height - kernel_height + pad_top + pad_bottom) / stride_height + 1);
91 analyzer.
Simplify((width - kernel_width + pad_left + pad_right) / stride_width + 1);
98 out_shape.
Set(height_axis, out_height);
99 out_shape.
Set(width_axis, out_width);
105 const bool do_pad = ((padding_h0 && *padding_h0) || (padding_w0 && *padding_w0)) ||
106 ((padding_h1 && *padding_h1) || (padding_w1 && *padding_w1));
110 ravel_shape.Set(height_axis, ravel_shape[height_axis] + pad_top + pad_bottom);
111 ravel_shape.Set(width_axis, ravel_shape[width_axis] + pad_left + pad_right);
119 auto pad_x = do_pad ?
pad(x, pad_before, pad_after,
tvm::min_value(x->dtype),
"pad_temp") : x;
125 window_inds.Set(height_axis, inds[height_axis] * stride_height + dheight);
126 window_inds.Set(width_axis, inds[width_axis] * stride_width + dwidth);
127 auto idx = detail::RavelIndex(window_inds, ravel_shape);
128 return argmax({idx, pad_x(window_inds)}, {dheight, dwidth},
nullptr);
132 auto mp_inds = mp_argmax[0];
138 pad_inds.Set(height_axis, pad_inds[height_axis] + pad_top);
139 pad_inds.Set(width_axis, pad_inds[width_axis] + pad_left);
140 auto idx = detail::RavelIndex(pad_inds, ravel_shape);
143 out_idx.Set(height_axis, (inds[height_axis] + pad_top) / stride_height - windowh);
144 out_idx.Set(width_axis, (inds[width_axis] + pad_left) / stride_width - windoww);
147 pad_inds[height_axis] < kernel_height,
make_const(pad_inds[height_axis].dtype(), 0),
148 (pad_inds[height_axis] - kernel_height) / stride_height + 1);
150 pad_inds[width_axis] < kernel_width,
make_const(pad_inds[width_axis].dtype(), 0),
151 (pad_inds[width_axis] - kernel_width) / stride_width + 1);
155 out_idx[width_axis] >= out_idx_lower_w),
156 mp_inds(out_idx) == idx),
160 "T_pool_grad",
"pool_grad_max");
169 PrimExpr pad_h_idx = inds[height_axis] + pad_top;
170 PrimExpr pad_w_idx = inds[width_axis] + pad_left;
174 out_idx.Set(height_axis, (pad_h_idx / stride_height - windowh));
175 out_idx.Set(width_axis, (pad_w_idx / stride_width - windoww));
179 (pad_h_idx - kernel_height) / stride_height + 1);
182 (pad_w_idx - kernel_width) / stride_width + 1);
185 if (count_include_pad) {
186 divide_factor = kernel_height * kernel_width;
188 PrimExpr h_start = out_idx[height_axis] * stride_height - pad_top;
189 PrimExpr w_start = out_idx[width_axis] * stride_width - pad_left;
191 PrimExpr h_end =
min(h_start + kernel_height, height);
192 PrimExpr w_end =
min(w_start + kernel_width, width);
200 out_idx[height_axis] < out_height),
201 tir::And(out_idx[width_axis] >= out_idx_lower_w,
202 out_idx[width_axis] < out_width)),
203 out_grad(out_idx) / divide_factor,
make_const(out_grad->dtype, 0)),
206 "T_pool_grad",
"pool_grad_avg");
208 LOG(ERROR) <<
"Unrecognized pool_type: " << pool_type;
226 if (depth_axis) *depth_axis = -1;
227 if (height_axis) *height_axis = -1;
228 if (width_axis) *width_axis = -1;
230 for (
size_t i = 0; i < layout.size(); ++i) {
231 if ((layout[i] >=
'A' && layout[i] <=
'Z') || (layout[i] >=
'a' && layout[i] <=
'z')) {
232 if (layout[i] ==
'D' && depth_axis) {
233 if (*depth_axis != -1)
return false;
234 *depth_axis = curr_idx;
235 }
else if (layout[i] ==
'H' && height_axis) {
236 if (*height_axis != -1)
return false;
237 *height_axis = curr_idx;
238 }
else if (layout[i] ==
'W' && width_axis) {
239 if (*width_axis != -1)
return false;
240 *width_axis = curr_idx;
241 }
else if (layout[i] ==
'd' || layout[i] ==
'h' || layout[i] ==
'w') {
248 if ((depth_axis && *depth_axis == -1) || (height_axis && *height_axis == -1) ||
249 (width_axis && *width_axis == -1))
258 inline bool find_width(
const std::string& layout,
int* width_axis) {
295 PoolType pool_type,
bool ceil_mode,
const std::string& layout =
"NCHW",
296 bool count_include_pad =
true) {
297 int height_axis = -1, width_axis = -1;
298 ICHECK(
find_height_width(layout, &height_axis, &width_axis)) <<
"Unsupported layout " << layout;
299 return pool_grad_impl(out_grad, x, kernel_size, stride_size, padding_size, pool_type, ceil_mode,
300 height_axis, width_axis, count_include_pad);
304 return indexdiv(out_index * idim, odim);
323 PoolType pool_type,
const std::vector<int>& axes) {
324 const auto n_dim = output_size.
size();
325 ICHECK_EQ(axes.size(), n_dim) <<
"The number of axes not equal to the in/out dimension";
330 for (
size_t i = 0; i < n_dim; ++i) {
333 out_shape.
Set(axes[i], out_size[i]);
336 auto get_iter_vars = [=](
const Array<Var>& output,
bool reduce_indices) {
338 for (
size_t i = 0; i < output.
size(); ++i) indices.
push_back(output[i]);
340 for (
size_t i = 0; i < n_dim; ++i) {
341 auto i_start =
start_index(output[axes[i]], out_size[i], in_size[i]);
342 auto i_end =
end_index(output[axes[i]], out_size[i], in_size[i]);
343 auto rv_name =
"rv" + std::to_string(i);
346 if (reduce_indices) {
347 indices.
Set(axes[i], i_start + rv_axis);
350 return std::make_tuple(indices, reduce_axes);
361 std::tie(indices, reduce_axes) = get_iter_vars(output,
true);
362 return tvm::max(x(indices), reduce_axes);
364 "adaptive_pool_max",
"adaptive_pool_max", attrs);
372 std::tie(indices, reduce_axes) = get_iter_vars(output,
true);
373 return tvm::sum(x(indices), reduce_axes);
375 "adaptive_pool_sum",
"adaptive_pool_sum");
382 std::tie(indices, reduce_axes) = get_iter_vars(output,
false);
385 for (
size_t i = 0; i < n_dim; ++i) {
386 divide_factor *=
tvm::cast(x->dtype, reduce_axes[i]->dom->extent);
389 return div(pool_sum(indices), divide_factor);
393 LOG(ERROR) <<
"Unrecognized pool_type: " << pool_type;
425 const std::string& layout =
"NCHW") {
426 int height_axis = -1, width_axis = -1;
427 ICHECK(
find_height_width(layout, &height_axis, &width_axis)) <<
"Unsupported layout " << layout;
440 PoolType pool_type,
const std::string& layout =
"NCDHW") {
441 int depth_axis = -1, height_axis = -1, width_axis = -1;
443 <<
"Unsupported layout " << layout;
444 return adaptive_pool_impl(x, output_size, pool_type, {depth_axis, height_axis, width_axis});
456 PoolType pool_type,
const std::string& layout =
"NCW") {
458 ICHECK(
find_width(layout, &width_axis)) <<
"Unsupported layout " << layout;
510 const std::vector<int>& axis,
bool count_include_pad) {
511 int k_size = kernel_size.
size();
512 int x_size = x->shape.size();
513 ICHECK_EQ(stride_size.
size(), k_size) <<
"Pooling stride_size must have same elements as kernel";
514 ICHECK_EQ(padding_size.
size(), k_size * 2) <<
"Pooling padding_size must has double elements of"
516 ICHECK_EQ(axis.size(), k_size) <<
"axis must have same elements as kernel";
519 std::vector<PrimExpr> kernel(k_size);
520 std::vector<PrimExpr> stride(k_size);
521 std::vector<PrimExpr> dilation(k_size);
522 std::vector<PrimExpr> pad_head(k_size);
523 std::vector<PrimExpr> pad_tail(k_size);
524 std::vector<PrimExpr> offset(k_size, 0);
531 for (
int i = 0; i < k_size; i++) {
533 kernel[i] = kernel_size[i];
534 stride[i] = stride_size[i];
535 dilation[i] = dilation_size[i];
536 pad_head[i] = padding_size[i];
537 pad_tail[i] = padding_size[i + k_size];
545 offset[i] = stride[i] - 1;
546 pad_tail[i] += offset[i];
551 do_pad = do_pad || (padding0 && *padding0) || (padding1 && *padding1);
555 pad_before.
Set(ii, pad_head[i]);
556 pad_after.
Set(ii, pad_tail[i]);
561 data_shape[ii] - (kernel[i] - 1) * dilation[i] - 1 + pad_head[i] + pad_tail[i];
563 out_shape.
Set(ii, out_dim);
568 auto temp = do_pad ?
pad(x, pad_before, pad_after,
tvm::min_value(x->dtype),
"pad_temp") : x;
576 for (
int i = 0; i < k_size; i++) {
578 indices.
Set(ii, output[ii] * stride[i] + daxis[i] * dilation[i]);
580 return tvm::max(temp(indices), daxis);
582 "pool_max",
"pool_max", attrs);
586 auto temp = do_pad ?
pad(x, pad_before, pad_after, 0,
"pad_temp") : x;
595 for (
int i = 0; i < k_size; i++) {
597 indices.
Set(ii, output[ii] * stride[i] + daxis[i] * dilation[i]);
599 return tvm::sum(temp(indices), daxis);
601 "pool_sum",
"pool_sum");
609 if (count_include_pad) {
610 std::vector<PrimExpr> start(k_size);
611 std::vector<PrimExpr> end(k_size);
613 for (
int i = 0; i < k_size; i++) {
615 start[i] = output[ii] * stride[i] - pad_head[i];
620 end[i] = start[i] + (kernel[i] - 1) * dilation[i];
621 end[i] =
min(end[i], data_shape[ii] + pad_tail[i] - 1 - offset[i]);
622 num_el *= (end[i] - start[i]) / dilation[i] + 1;
624 return div(pool_sum(indices), num_el);
626 std::vector<PrimExpr> start(k_size);
627 std::vector<PrimExpr> end(k_size);
629 for (
int i = 0; i < k_size; i++) {
636 start[i] = output[ii] * stride[i] - pad_head[i];
637 end[i] = start[i] + (kernel[i] - 1) * dilation[i];
642 PrimExpr jumps_to_non_pad = (dilation[i] - 1 - start[i]) / dilation[i];
643 jumps_to_non_pad =
max(jumps_to_non_pad,
make_const(jumps_to_non_pad.dtype(), 0));
645 end[i] =
min(end[i], data_shape[ii] - 1);
646 num_el *= (end[i] - (start[i] + dilation[i] * jumps_to_non_pad)) / dilation[i] + 1;
650 return div(pool_sum(indices), divide_factor);
655 LOG(ERROR) <<
"Unrecognized pool_type: " << pool_type;
693 const std::string& layout =
"NCW",
bool count_include_pad =
true) {
695 ICHECK(
find_width(layout, &width_axis)) <<
"Unsupported layout " << layout;
696 std::vector<int> axis = {width_axis};
697 return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type,
698 ceil_mode, axis, count_include_pad);
734 const std::string& layout =
"NCHW",
bool count_include_pad =
true) {
735 int height_axis = -1, width_axis = -1;
736 ICHECK(
find_height_width(layout, &height_axis, &width_axis)) <<
"Unsupported layout " << layout;
737 std::vector<int> axis = {height_axis, width_axis};
738 return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type,
739 ceil_mode, axis, count_include_pad);
776 const std::string& layout =
"NCDHW",
bool count_include_pad =
true) {
777 int depth_axis = -1, height_axis = -1, width_axis = -1;
779 <<
"Unsupported layout " << layout;
780 std::vector<int> axis = {depth_axis, height_axis, width_axis};
781 return pool_impl_nd(x, kernel_size, stride_size, dilation_size, padding_size, pool_type,
782 ceil_mode, axis, count_include_pad);
Algebra expression simplifications.
Reference to PrimExprNode.
Definition: expr.h:115
DataType dtype() const
Definition: expr.h:129
Range container
Definition: expr.h:725
Analyzer that contains bunch of sub-analyzers.
Definition: analyzer.h:629
PrimExpr Simplify(const PrimExpr &expr, int steps=2)
Simplify expr.
Array, container representing a contiguous sequence of ObjectRefs.
Definition: array.h:289
iterator end() const
Definition: array.h:390
void push_back(const T &item)
push a new item to the back of the list
Definition: array.h:457
void Set(int64_t i, T value)
set i-th element of the array.
Definition: array.h:621
iterator begin() const
Definition: array.h:387
size_t size() const
Definition: array.h:420
static DataType Int(int bits, int lanes=1)
Construct an int type.
Definition: data_type.h:219
Map container of NodeRef->NodeRef in DSL graph. Map implements copy on write semantics,...
Definition: map.h:1271
void Set(const K &key, const V &value)
set the Map.
Definition: map.h:1374
Reference to string objects.
Definition: string.h:98
Tensor structure representing a possible input, or intermediate computation result.
Definition: tensor.h:102
Managed reference to AndNode.
Definition: expr.h:482
Managed reference to SelectNode.
Definition: expr.h:609
a named variable in TIR
Definition: var.h:89
Tensor expression language DSL.
Definition: extracted_task.h:33
IterVar reduce_axis(Range dom, std::string name="rv")
Create a new IterVar for reduction operations.
Var var(std::string name_hint, DataType t=DataType::Int(32))
Construct a new Var expression.
Tensor compute(Array< PrimExpr > shape, FCompute fcompute, std::string name="tensor", std::string tag="", Map< String, ObjectRef > attrs={})
Construct a new tensor by computing over shape, using the computation rule: result_tensor[axis] = fco...
PrimExpr make_const(DataType t, ValueType value, Span span=Span())
Make a const value with certain data type.
Definition: op.h:962
const int64_t * as_const_int(const PrimExpr &x)
Get x as constant int expression.
Definition: op.h:804
Tensor adaptive_pool(const Tensor &x, const Array< PrimExpr > &output_size, PoolType pool_type, const std::string &layout="NCHW")
Adaptively perform pooling on height and width dimension of data. The pooling kernel and stride sizes...
Definition: pooling.h:424
Tensor pool_impl_nd(const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &dilation_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const std::vector< int > &axis, bool count_include_pad)
Perform pooling on N-dimension of data.
Definition: pooling.h:507
Tensor pool_grad_impl(const Tensor &out_grad, const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const size_t height_axis, const size_t width_axis, bool count_include_pad)
Definition: pooling.h:49
Tensor adaptive_pool_impl(const Tensor &x, const Array< PrimExpr > &output_size, PoolType pool_type, const std::vector< int > &axes)
Perform adaptive pooling on N dimensional data.
Definition: pooling.h:322
PoolType
Pooling type.
Definition: pooling.h:44
@ kAvgPool
Definition: pooling.h:45
@ kMaxPool
Definition: pooling.h:46
Tensor adaptive_pool3d(const Tensor &x, const Array< PrimExpr > &output_size, PoolType pool_type, const std::string &layout="NCDHW")
Adaptively perform pooling on three dimensional data. See the two dimensional version above for detai...
Definition: pooling.h:439
PrimExpr start_index(const Var &out_index, const PrimExpr &odim, const PrimExpr &idim)
Definition: pooling.h:303
Tensor pool_grad(const Tensor &out_grad, const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const std::string &layout="NCHW", bool count_include_pad=true)
Calculate gradient of pooling on height and width dimension of data. It decides the height and width ...
Definition: pooling.h:293
PrimExpr end_index(const Var &out_index, const PrimExpr &odim, const PrimExpr &idim)
Definition: pooling.h:307
Tensor pool2d(const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &dilation_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const std::string &layout="NCHW", bool count_include_pad=true)
Perform pooling on height and width dimension of data. It decides the height and width dimension acco...
Definition: pooling.h:731
bool find_depth_height_width(const std::string &layout, int *depth_axis, int *height_axis, int *width_axis)
Find index of Depth, Height or Width dimension in a layout string.
Definition: pooling.h:224
bool find_width(const std::string &layout, int *width_axis)
Definition: pooling.h:258
Tensor adaptive_pool1d(const Tensor &x, const Array< PrimExpr > &output_size, PoolType pool_type, const std::string &layout="NCW")
Adaptively perform pooling on one dimensional data. See the two dimensional version above for details...
Definition: pooling.h:455
Tensor pool1d(const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &dilation_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const std::string &layout="NCW", bool count_include_pad=true)
Perform pooling on the width dimension of data. Width axis is determined by the layout string in whic...
Definition: pooling.h:690
Tensor pool3d(const Tensor &x, const Array< PrimExpr > &kernel_size, const Array< PrimExpr > &stride_size, const Array< PrimExpr > &dilation_size, const Array< PrimExpr > &padding_size, PoolType pool_type, bool ceil_mode, const std::string &layout="NCDHW", bool count_include_pad=true)
Perform pooling on depth, height and width dimension of data. It decides the depth,...
Definition: pooling.h:773
bool find_height_width(const std::string &layout, int *height_axis, int *width_axis)
Definition: pooling.h:254
Tensor global_pool(const Tensor &x, PoolType pool_type, const std::string &layout="NCHW")
Perform global pooling on height and width dimension of data. It decides the height and width dimensi...
Definition: pooling.h:487
constexpr auto kElementWise
Definition: tags.h:32
tvm::te::Tensor pad(const tvm::te::Tensor &t, const tvm::Array< tvm::PrimExpr > &pad_before, tvm::Array< tvm::PrimExpr > pad_after=tvm::Array< tvm::PrimExpr >(), PrimExpr pad_value=PrimExpr(), std::string name="T_pad", std::string tag=kElementWise, std::string pad_mode="constant", const Array< PrimExpr > *dyn_output_shape=nullptr)
Creates an operation that performs padding.
Definition: nn.h:155
FCommReduce MakeArgmaxReducer(bool select_last_index=false)
Definition: reduction.h:506
Tensor argmax(const Tensor &data, const Array< Integer > &axis, bool keepdims=false, bool atleast1d=false, bool select_last_index=false)
Creates an operation that finds the indices of the maximum values over a given axis.
Definition: reduction.h:560
Tensor max(const Tensor &data, const Array< Integer > &axis, bool keepdims=false, bool atleast1d=false)
Creates an operation that finds the maximum of elements over a given axis.
Definition: reduction.h:440
constexpr auto kCommReduceIdx
Definition: tags.h:35
Tensor min(const Tensor &data, const Array< Integer > &axis, bool keepdims=false, bool atleast1d=false)
Creates an operation that finds the minimum of elements over a given axis.
Definition: reduction.h:421
runtime implementation for LibTorch/TorchScript.
Definition: analyzer.h:36
PrimExpr max(PrimExpr a, PrimExpr b, Span span=Span())
take maximum of two values
PrimExpr div(PrimExpr a, PrimExpr b, Span span=Span())
compute division in C semantics.
PrimExpr if_then_else(PrimExpr cond, PrimExpr true_value, PrimExpr false_value, Span span=Span())
Conditional expression.
PrimExpr min_value(const DataType &dtype, Span span=Span())
PrimExpr cast(const DataType &t, PrimExpr value, Span span=Span())
cast value to type.
PrimExpr indexdiv(PrimExpr a, PrimExpr b, Span span=Span())
compute floor(a / b) where a and b are non-negative.
PrimExpr indexmod(PrimExpr a, PrimExpr b, Span span=Span())
compute the remainder floor(a / b) where a and b are non-negative.
PrimExpr sum(PrimExpr source, Array< tir::IterVar > axis, Array< PrimExpr > init={}, Span span=Span())
sum of source expression over axis
Reduction op constructors.