tvm
nn.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
24 #ifndef TVM_RELAX_ATTRS_NN_H_
25 #define TVM_RELAX_ATTRS_NN_H_
26 
27 #include <tvm/relax/expr.h>
28 
29 namespace tvm {
30 namespace relax {
31 
33 struct Conv1DAttrs : public AttrsNodeReflAdapter<Conv1DAttrs> {
34  ffi::Array<int64_t> strides;
35  ffi::Array<int64_t> padding;
36  ffi::Array<int64_t> dilation;
37  int groups;
38  ffi::String data_layout;
39  ffi::String kernel_layout;
40  ffi::String out_layout;
42 
43  static void RegisterReflection() {
44  namespace refl = tvm::ffi::reflection;
45  refl::ObjectDef<Conv1DAttrs>()
46  .def_ro("strides", &Conv1DAttrs::strides, "Specifies the strides of the convolution.")
47  .def_ro("padding", &Conv1DAttrs::padding,
48  "If padding is non-zero, then the input is implicitly zero-padded"
49  "Padding support both symmetric and asymmetric as"
50  "one int : same padding used on both sides"
51  "two int : padding width in the order of (left, right)")
52  .def_ro("dilation", &Conv1DAttrs::dilation,
53  "Specifies the dilation rate to use for dilated convolution.")
54  .def_ro("groups", &Conv1DAttrs::groups,
55  "Number of groups to split the input into for grouped convolution. The number of "
56  "input and "
57  "output channels should be divisible by the number of groups.")
58  .def_ro("data_layout", &Conv1DAttrs::data_layout,
59  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
60  "'N', 'C', 'W' stands for batch, channel, width"
61  "dimensions respectively. Convolution is applied on the 'W' dimensions.")
62  .def_ro("kernel_layout", &Conv1DAttrs::kernel_layout,
63  "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
64  "'O', 'I', 'W' stands for num_filter, input_channel, and width"
65  "dimensions respectively.")
66  .def_ro("out_layout", &Conv1DAttrs::out_layout,
67  "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
68  "'N', 'C', 'W' stands for batch, channel, and width"
69  "dimensions respectively. Default to be same as input layout.")
70  .def_ro("out_dtype", &Conv1DAttrs::out_dtype,
71  "Output data type, set to explicit type under mixed precision setting");
72  }
74 }; // struct Conv1dAttrs
75 
77 struct Conv2DAttrs : public AttrsNodeReflAdapter<Conv2DAttrs> {
78  ffi::Array<int64_t> strides;
79  ffi::Array<int64_t> padding;
80  ffi::Array<int64_t> dilation;
81  int groups;
82  ffi::String data_layout;
83  ffi::String kernel_layout;
84  ffi::String out_layout;
86 
87  static void RegisterReflection() {
88  namespace refl = tvm::ffi::reflection;
89  refl::ObjectDef<Conv2DAttrs>()
90  .def_ro("strides", &Conv2DAttrs::strides, "Specifies the strides of the convolution.")
91  .def_ro("padding", &Conv2DAttrs::padding,
92  "If padding is non-zero, then the input is implicitly zero-padded"
93  "Padding support both symmetric and asymmetric as"
94  "one int : same padding used on all sides"
95  "two int : bottom, right will use same padding as top, left"
96  "four int : padding width in the order of (top, left, bottom, right)")
97  .def_ro("dilation", &Conv2DAttrs::dilation,
98  "Specifies the dilation rate to use for dilated convolution.")
99  .def_ro("groups", &Conv2DAttrs::groups,
100  "Number of groups to split the input into for grouped convolution. The number of "
101  "input and "
102  "output channels should be divisible by the number of groups.")
103  .def_ro("data_layout", &Conv2DAttrs::data_layout,
104  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
105  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
106  "dimensions respectively. Convolution is applied on the 'H' and"
107  "'W' dimensions.")
108  .def_ro("kernel_layout", &Conv2DAttrs::kernel_layout,
109  "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
110  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
111  "dimensions respectively.")
112  .def_ro("out_layout", &Conv2DAttrs::out_layout,
113  "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
114  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
115  "dimensions respectively. Default to be same as input layout.")
116  .def_ro("out_dtype", &Conv2DAttrs::out_dtype,
117  "Output data type, set to explicit type under mixed precision setting");
118  }
120 }; // struct Conv2dAttrs
121 
123 struct Conv3DAttrs : public AttrsNodeReflAdapter<Conv3DAttrs> {
124  ffi::Array<int64_t> strides;
125  ffi::Array<int64_t> padding;
126  ffi::Array<int64_t> dilation;
127  int groups;
128  ffi::String data_layout;
129  ffi::String kernel_layout;
130  ffi::String out_layout;
132 
133  static void RegisterReflection() {
134  namespace refl = tvm::ffi::reflection;
135  refl::ObjectDef<Conv3DAttrs>()
136  .def_ro("strides", &Conv3DAttrs::strides, "Specifies the strides of the convolution.")
137  .def_ro(
138  "padding", &Conv3DAttrs::padding,
139  "If padding is non-zero, then the input is implicitly zero-padded"
140  "Padding support both symmetric and asymmetric as"
141  "one int : same padding used on all sides"
142  "two int : bottom, right will use same padding as top, left"
143  "four int : padding width in the order of (forward, back, top, left, bottom, right)")
144  .def_ro("dilation", &Conv3DAttrs::dilation,
145  "Specifies the dilation rate to use for dilated convolution.")
146  .def_ro("groups", &Conv3DAttrs::groups,
147  "Number of groups to split the input into for grouped convolution. The number of "
148  "input and "
149  "output channels should be divisible by the number of groups.")
150  .def_ro("data_layout", &Conv3DAttrs::data_layout,
151  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
152  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
153  "dimensions respectively. Convolution is applied on the 'D', 'H', and"
154  "'W' dimensions.")
155  .def_ro(
156  "kernel_layout", &Conv3DAttrs::kernel_layout,
157  "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
158  "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width"
159  "dimensions respectively.")
160  .def_ro("out_layout", &Conv3DAttrs::out_layout,
161  "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
162  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
163  "dimensions respectively. Default to be same as input layout.")
164  .def_ro("out_dtype", &Conv3DAttrs::out_dtype,
165  "Output data type, set to explicit type under mixed precision setting");
166  }
168 }; // struct Conv3dAttrs
169 
171 struct Conv1DTransposeAttrs : public AttrsNodeReflAdapter<Conv1DTransposeAttrs> {
172  ffi::Array<int64_t> strides;
173  ffi::Array<int64_t> padding;
174  ffi::Array<int64_t> output_padding;
175  ffi::Array<int64_t> dilation;
176  int groups;
177  ffi::String data_layout;
178  ffi::String kernel_layout;
179  ffi::String out_layout;
181 
182  static void RegisterReflection() {
183  namespace refl = tvm::ffi::reflection;
184  refl::ObjectDef<Conv1DTransposeAttrs>()
185  .def_ro("strides", &Conv1DTransposeAttrs::strides,
186  "Specifies the strides of the convolution.")
187  .def_ro("padding", &Conv1DTransposeAttrs::padding,
188  "If padding is non-zero, then the input is implicitly zero-padded"
189  "Padding support both symmetric and asymmetric as"
190  "one int : same padding used on both sides"
191  "two int : padding width in the order of (left, right)")
192  .def_ro("output_padding", &Conv1DTransposeAttrs::output_padding,
193  "Used to disambiguate the output shape.")
194  .def_ro("dilation", &Conv1DTransposeAttrs::dilation,
195  "Specifies the dilation rate to use for dilated convolution.")
196  .def_ro("groups", &Conv1DTransposeAttrs::groups,
197  "Number of groups to split the input into for grouped convolution. The number of "
198  "input and "
199  "output channels should be divisible by the number of groups.")
200  .def_ro("data_layout", &Conv1DTransposeAttrs::data_layout,
201  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
202  "'N', 'C', 'W' stands for batch, channel, width"
203  "dimensions respectively. Convolution is applied on the 'W' dimensions.")
204  .def_ro("kernel_layout", &Conv1DTransposeAttrs::kernel_layout,
205  "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
206  "'O', 'I', 'W' stands for num_filter, input_channel, and width"
207  "dimensions respectively.")
208  .def_ro("out_layout", &Conv1DTransposeAttrs::out_layout,
209  "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
210  "'N', 'C', 'W' stands for batch, channel, and width"
211  "dimensions respectively. Default to be same as input layout.")
212  .def_ro("out_dtype", &Conv1DTransposeAttrs::out_dtype,
213  "Output data type, set to explicit type under mixed precision setting");
214  }
215  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv1DTransposeAttrs", Conv1DTransposeAttrs,
216  BaseAttrsNode);
217 }; // struct Conv1DTransposeAttrs
218 
220 struct Conv2DTransposeAttrs : public AttrsNodeReflAdapter<Conv2DTransposeAttrs> {
221  ffi::Array<int64_t> strides;
222  ffi::Array<int64_t> padding;
223  ffi::Array<int64_t> output_padding;
224  ffi::Array<int64_t> dilation;
225  int groups;
226  ffi::String data_layout;
227  ffi::String kernel_layout;
228  ffi::String out_layout;
230 
231  static void RegisterReflection() {
232  namespace refl = tvm::ffi::reflection;
233  refl::ObjectDef<Conv2DTransposeAttrs>()
234  .def_ro("strides", &Conv2DTransposeAttrs::strides,
235  "Specifies the strides of the convolution.")
236  .def_ro("padding", &Conv2DTransposeAttrs::padding,
237  "If padding is non-zero, then the input is implicitly zero-padded"
238  "Padding support both symmetric and asymmetric as"
239  "one int : same padding used on all sides"
240  "two int : bottom, right will use same padding as top, left"
241  "four int : padding width in the order of (top, left, bottom, right)")
242  .def_ro("output_padding", &Conv2DTransposeAttrs::output_padding,
243  "Used to disambiguate the output shape.")
244  .def_ro("dilation", &Conv2DTransposeAttrs::dilation,
245  "Specifies the dilation rate to use for dilated convolution.")
246  .def_ro("groups", &Conv2DTransposeAttrs::groups,
247  "Number of groups to split the input into for grouped convolution. The number of "
248  "input and "
249  "output channels should be divisible by the number of groups.")
250  .def_ro("data_layout", &Conv2DTransposeAttrs::data_layout,
251  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
252  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
253  "dimensions respectively. Convolution is applied on the 'H' and"
254  "'W' dimensions.")
255  .def_ro("kernel_layout", &Conv2DTransposeAttrs::kernel_layout,
256  "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
257  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
258  "dimensions respectively.")
259  .def_ro("out_layout", &Conv2DTransposeAttrs::out_layout,
260  "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
261  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
262  "dimensions respectively. Default to be same as input layout.")
263  .def_ro("out_dtype", &Conv2DTransposeAttrs::out_dtype,
264  "Output data type, set to explicit type under mixed precision setting");
265  }
266  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv2DTransposeAttrs", Conv2DTransposeAttrs,
267  BaseAttrsNode);
268 }; // struct Conv2DTransposeAttrs
269 
271 struct Conv3DTransposeAttrs : public AttrsNodeReflAdapter<Conv3DTransposeAttrs> {
272  ffi::Array<int64_t> strides;
273  ffi::Array<int64_t> padding;
274  ffi::Array<int64_t> output_padding;
275  ffi::Array<int64_t> dilation;
276  int groups;
277  ffi::String data_layout;
278  ffi::String kernel_layout;
279  ffi::String out_layout;
281 
282  static void RegisterReflection() {
283  namespace refl = tvm::ffi::reflection;
284  refl::ObjectDef<Conv3DTransposeAttrs>()
285  .def_ro("strides", &Conv3DTransposeAttrs::strides,
286  "Specifies the strides of the convolution.")
287  .def_ro("padding", &Conv3DTransposeAttrs::padding,
288  "If padding is non-zero, then the input is implicitly zero-padded"
289  "Padding support both symmetric and asymmetric as"
290  "one int : same padding used on all sides"
291  "three int : back/bottom/right will use same padding as front/top/left"
292  "six int : padding width in the order of (front, top, left, back, bottom, right)")
293  .def_ro("output_padding", &Conv3DTransposeAttrs::output_padding,
294  "Used to disambiguate the output shape.")
295  .def_ro("dilation", &Conv3DTransposeAttrs::dilation,
296  "Specifies the dilation rate to use for dilated convolution.")
297  .def_ro("groups", &Conv3DTransposeAttrs::groups,
298  "Number of groups to split the input into for grouped convolution. The number of "
299  "input and "
300  "output channels should be divisible by the number of groups.")
301  .def_ro("data_layout", &Conv3DTransposeAttrs::data_layout,
302  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
303  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
304  "dimensions respectively. Convolution is applied on the 'D', 'H', and"
305  "'W' dimensions.")
306  .def_ro("kernel_layout", &Conv3DTransposeAttrs::kernel_layout,
307  "Dimension ordering of weight. Can be 'IODHW', etc."
308  "'I', 'O', 'D', 'H', 'W' stands for input_channel, output_channel, depth, height, and "
309  "width"
310  "dimensions respectively.")
311  .def_ro("out_layout", &Conv3DTransposeAttrs::out_layout,
312  "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
313  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
314  "dimensions respectively. Default to be same as input layout.")
315  .def_ro("out_dtype", &Conv3DTransposeAttrs::out_dtype,
316  "Output data type, set to explicit type under mixed precision setting");
317  }
318  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv3DTransposeAttrs", Conv3DTransposeAttrs,
319  BaseAttrsNode);
320 }; // struct Conv3DTransposeAttrs
321 
323 struct Pool1DAttrs : public AttrsNodeReflAdapter<Pool1DAttrs> {
324  ffi::Array<int64_t> pool_size;
325  ffi::Array<int64_t> strides;
326  ffi::Array<int64_t> padding;
327  ffi::Array<int64_t> dilation;
328  bool ceil_mode;
330  ffi::String layout;
331  ffi::String out_layout;
332 
333  static void RegisterReflection() {
334  namespace refl = tvm::ffi::reflection;
335  refl::ObjectDef<Pool1DAttrs>()
336  .def_ro("pool_size", &Pool1DAttrs::pool_size, "Size of the pooling windows.")
337  .def_ro("strides", &Pool1DAttrs::strides, "Specifies the strides of the convolution.")
338  .def_ro("dilation", &Pool1DAttrs::dilation, "Specifies the dilation of the convolution.")
339  .def_ro("padding", &Pool1DAttrs::padding,
340  "If padding is non-zero, then the input is implicitly zero-padded"
341  "Padding support both symmetric and asymmetric as"
342  "one int : same padding used on all sides"
343  "two int : padding width in the order of (left, right)")
344  .def_ro(
345  "ceil_mode", &Pool1DAttrs::ceil_mode,
346  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
347  "every element in the input tensor will be covered by a sliding window.")
348  .def_ro("count_include_pad", &Pool1DAttrs::count_include_pad,
349  "When true, will include padding to compute the average")
350  .def_ro("layout", &Pool1DAttrs::layout,
351  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
352  "'N', 'C', 'W' stands for batch, channel, and width"
353  "dimensions respectively. Pooling is applied on the 'W' dimensions.",
354  refl::DefaultValue("NCW"))
355  .def_ro("out_layout", &Pool1DAttrs::out_layout,
356  "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
357  "'N', 'C', 'W' stands for batch, channel, and width"
358  "dimensions respectively. Pooling is applied on the 'W' dimensions.");
359  }
361 }; // struct Pool1dAttrs
362 
364 struct Pool2DAttrs : public AttrsNodeReflAdapter<Pool2DAttrs> {
365  ffi::Array<int64_t> pool_size;
366  ffi::Array<int64_t> strides;
367  ffi::Array<int64_t> padding;
368  ffi::Array<int64_t> dilation;
369  bool ceil_mode;
371  ffi::String layout;
372  ffi::String out_layout;
373 
374  static void RegisterReflection() {
375  namespace refl = tvm::ffi::reflection;
376  refl::ObjectDef<Pool2DAttrs>()
377  .def_ro("pool_size", &Pool2DAttrs::pool_size, "Size of the pooling windows.")
378  .def_ro("strides", &Pool2DAttrs::strides, "Specifies the strides of the convolution.")
379  .def_ro("dilation", &Pool2DAttrs::dilation, "Specifies the dilation of the convolution.")
380  .def_ro("padding", &Pool2DAttrs::padding,
381  "If padding is non-zero, then the input is implicitly zero-padded"
382  "Padding support both symmetric and asymmetric as"
383  "one int : same padding used on all sides"
384  "two int : bottom, right will use same padding as top, left"
385  "four int : padding width in the order of (top, left, bottom, right)")
386  .def_ro(
387  "ceil_mode", &Pool2DAttrs::ceil_mode,
388  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
389  "every element in the input tensor will be covered by a sliding window.")
390  .def_ro("count_include_pad", &Pool2DAttrs::count_include_pad,
391  "When true, will include padding to compute the average")
392  .def_ro("layout", &Pool2DAttrs::layout,
393  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
394  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
395  "dimensions respectively. Pooling is applied on the 'H' and"
396  "'W' dimensions.")
397  .def_ro("out_layout", &Pool2DAttrs::out_layout,
398  "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
399  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
400  "dimensions respectively. Pooling is applied on the 'H' and"
401  "'W' dimensions.");
402  }
404 }; // struct Pool2dAttrs
405 
407 struct Pool3DAttrs : public AttrsNodeReflAdapter<Pool3DAttrs> {
408  ffi::Array<int64_t> pool_size;
409  ffi::Array<int64_t> strides;
410  ffi::Array<int64_t> padding;
411  ffi::Array<int64_t> dilation;
412  bool ceil_mode;
414  ffi::String layout;
415  ffi::String out_layout;
416 
417  static void RegisterReflection() {
418  namespace refl = tvm::ffi::reflection;
419  refl::ObjectDef<Pool3DAttrs>()
420  .def_ro("pool_size", &Pool3DAttrs::pool_size, "Size of the pooling windows.")
421  .def_ro("strides", &Pool3DAttrs::strides, "Specifies the strides of the convolution.")
422  .def_ro("dilation", &Pool3DAttrs::dilation, "Specifies the dilation of the convolution.")
423  .def_ro("padding", &Pool3DAttrs::padding,
424  "If padding is non-zero, then the input is implicitly zero-padded"
425  "Padding support both symmetric and asymmetric as"
426  "one int : same padding used on all sides"
427  "three int : back, bottom, right will use same padding as front, top, left"
428  "four int : padding width in the order of (front, top, left, back, bottom, right)")
429  .def_ro(
430  "ceil_mode", &Pool3DAttrs::ceil_mode,
431  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
432  "every element in the input tensor will be covered by a sliding window.")
433  .def_ro("count_include_pad", &Pool3DAttrs::count_include_pad,
434  "When true, will include padding to compute the average")
435  .def_ro("layout", &Pool3DAttrs::layout,
436  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
437  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
438  "dimensions respectively. Pooling is applied on the 'D', 'H' and"
439  "'W' dimensions.")
440  .def_ro("out_layout", &Pool3DAttrs::out_layout,
441  "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
442  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
443  "dimensions respectively. Pooling is applied on the 'D', 'H' and"
444  "'W' dimensions.");
445  }
447 }; // struct Pool3dAttrs
448 
450 struct AdaptivePool1DAttrs : public AttrsNodeReflAdapter<AdaptivePool1DAttrs> {
451  ffi::Optional<ffi::Array<int64_t>> output_size;
452  ffi::String layout;
453  ffi::String out_layout;
454 
455  static void RegisterReflection() {
456  namespace refl = tvm::ffi::reflection;
457  refl::ObjectDef<AdaptivePool1DAttrs>()
458  .def_ro("output_size", &AdaptivePool1DAttrs::output_size, "Output width.")
459  .def_ro("layout", &AdaptivePool1DAttrs::layout,
460  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
461  "'N', 'C', 'W' stands for batch, channel and width"
462  "dimensions respectively. Pooling is applied on the"
463  "'W' dimensions.")
464  .def_ro("out_layout", &AdaptivePool1DAttrs::out_layout,
465  "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
466  "'N', 'C', 'W' stands for batch, channel and width"
467  "dimensions respectively. Pooling is applied on the"
468  "'W' dimensions.");
469  }
470  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool1DAttrs", AdaptivePool1DAttrs,
471  BaseAttrsNode);
472 }; // struct AdaptivePool1DAttrs
473 
475 struct AdaptivePool2DAttrs : public AttrsNodeReflAdapter<AdaptivePool2DAttrs> {
476  ffi::Optional<ffi::Array<int64_t>> output_size;
477  ffi::String layout;
478  ffi::String out_layout;
479 
480  static void RegisterReflection() {
481  namespace refl = tvm::ffi::reflection;
482  refl::ObjectDef<AdaptivePool2DAttrs>()
483  .def_ro("output_size", &AdaptivePool2DAttrs::output_size, "Output height and width.")
484  .def_ro("layout", &AdaptivePool2DAttrs::layout,
485  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
486  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
487  "dimensions respectively. Pooling is applied on the 'H' and"
488  "'W' dimensions.")
489  .def_ro("out_layout", &AdaptivePool2DAttrs::out_layout,
490  "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
491  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
492  "dimensions respectively. Pooling is applied on the 'H' and"
493  "'W' dimensions.");
494  }
495  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool2DAttrs", AdaptivePool2DAttrs,
496  BaseAttrsNode);
497 }; // struct AdaptivePool2DAttrs
498 
500 struct AdaptivePool3DAttrs : public AttrsNodeReflAdapter<AdaptivePool3DAttrs> {
501  ffi::Optional<ffi::Array<int64_t>> output_size;
502  ffi::String layout;
503  ffi::String out_layout;
504 
505  static void RegisterReflection() {
506  namespace refl = tvm::ffi::reflection;
507  refl::ObjectDef<AdaptivePool3DAttrs>()
508  .def_ro("output_size", &AdaptivePool3DAttrs::output_size, "Output depth, height and width.")
509  .def_ro("layout", &AdaptivePool3DAttrs::layout,
510  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
511  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
512  "dimensions respectively. Pooling is applied on 'D', 'H' and"
513  "'W' dimensions.")
514  .def_ro("out_layout", &AdaptivePool3DAttrs::out_layout,
515  "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
516  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
517  "dimensions respectively. Pooling is applied on 'D', 'H' and"
518  "'W' dimensions.");
519  }
520  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool3DAttrs", AdaptivePool3DAttrs,
521  BaseAttrsNode);
522 }; // struct AdaptivePool3DAttrs
523 
525 struct SoftmaxAttrs : public AttrsNodeReflAdapter<SoftmaxAttrs> {
526  int axis;
527 
528  static void RegisterReflection() {
529  namespace refl = tvm::ffi::reflection;
530  refl::ObjectDef<SoftmaxAttrs>().def_ro("axis", &SoftmaxAttrs::axis,
531  "The axis to sum over when computing softmax.");
532  }
534 };
535 
537 struct LeakyReluAttrs : public AttrsNodeReflAdapter<LeakyReluAttrs> {
538  double alpha;
539 
540  static void RegisterReflection() {
541  namespace refl = tvm::ffi::reflection;
542  refl::ObjectDef<LeakyReluAttrs>().def_ro("alpha", &LeakyReluAttrs::alpha,
543  "The slope of the negative part.");
544  }
546 };
547 
549 struct SoftplusAttrs : public AttrsNodeReflAdapter<SoftplusAttrs> {
550  double beta;
551  double threshold;
552 
553  static void RegisterReflection() {
554  namespace refl = tvm::ffi::reflection;
555  refl::ObjectDef<SoftplusAttrs>()
556  .def_ro("beta", &SoftplusAttrs::beta,
557  "Scaling factor controlling the sharpness of the Softplus transition.")
558  .def_ro("threshold", &SoftplusAttrs::threshold,
559  "Value determining when to use linear approximation for numerical stability.");
560  }
562 };
563 
565 struct PReluAttrs : public AttrsNodeReflAdapter<PReluAttrs> {
566  int axis;
567 
568  static void RegisterReflection() {
569  namespace refl = tvm::ffi::reflection;
570  refl::ObjectDef<PReluAttrs>().def_ro("axis", &PReluAttrs::axis,
571  "The axis along which the alpha values are applied.");
572  }
574 };
575 
577 struct BatchNormAttrs : public AttrsNodeReflAdapter<BatchNormAttrs> {
578  int axis;
579  double epsilon;
580  bool center;
581  bool scale;
582  double momentum;
583  bool training;
584 
585  static void RegisterReflection() {
586  namespace refl = tvm::ffi::reflection;
587  refl::ObjectDef<BatchNormAttrs>()
588  .def_ro("axis", &BatchNormAttrs::axis, "The axis along which the normalization is applied.")
589  .def_ro("epsilon", &BatchNormAttrs::epsilon,
590  "Small float added to variance to avoid dividing by zero")
591  .def_ro("center", &BatchNormAttrs::center,
592  "Indicating if the beta offset will be added to the normalized tensor.")
593  .def_ro("scale", &BatchNormAttrs::scale,
594  "Indicating if the gamma scale will be multiplied.")
595  .def_ro("momentum", &BatchNormAttrs::momentum,
596  "The value used for the moving_mean and moving_var update.")
597  .def_ro("training", &BatchNormAttrs::training,
598  "Whether we are training (i.e., not in eval mode).");
599  }
601 }; // struct BatchNormAttrs
602 
604 struct LayerNormAttrs : public AttrsNodeReflAdapter<LayerNormAttrs> {
605  ffi::Array<Integer> axes;
606  double epsilon;
607  bool center;
608  bool scale;
609 
610  static void RegisterReflection() {
611  namespace refl = tvm::ffi::reflection;
612  refl::ObjectDef<LayerNormAttrs>()
613  .def_ro("axes", &LayerNormAttrs::axes,
614  "The axes that along which the normalization is applied.")
615  .def_ro("epsilon", &LayerNormAttrs::epsilon,
616  "Small float added to variance to avoid dividing by zero")
617  .def_ro("center", &LayerNormAttrs::center,
618  "Indicating if the beta offset will be added to the normalized tensor.")
619  .def_ro("scale", &LayerNormAttrs::scale,
620  "Indicating if the gamma scale will be multiplied.");
621  }
623 }; // struct LayerNormAttrs
624 
626 struct GroupNormAttrs : public AttrsNodeReflAdapter<GroupNormAttrs> {
629  ffi::Array<Integer> axes;
630  double epsilon;
631  bool center;
632  bool scale;
633 
634  static void RegisterReflection() {
635  namespace refl = tvm::ffi::reflection;
636  refl::ObjectDef<GroupNormAttrs>()
637  .def_ro("num_groups", &GroupNormAttrs::num_groups,
638  "The number of groups to separate the channels into.")
639  .def_ro("channel_axis", &GroupNormAttrs::channel_axis,
640  "The axis that represents the channel.")
641  .def_ro(
642  "axes", &GroupNormAttrs::axes,
643  "The axes that along which the normalization is applied (excluding the channel axis).")
644  .def_ro("epsilon", &GroupNormAttrs::epsilon,
645  "Small float added to variance to avoid dividing by zero")
646  .def_ro("center", &GroupNormAttrs::center,
647  "Indicating if the beta offset will be added to the normalized tensor.")
648  .def_ro("scale", &GroupNormAttrs::scale,
649  "Indicating if the gamma scale will be multiplied.");
650  }
652 }; // struct GroupNormAttrs
653 
655 struct InstanceNormAttrs : public AttrsNodeReflAdapter<InstanceNormAttrs> {
657  ffi::Array<Integer> axes;
658  double epsilon;
659  bool center;
660  bool scale;
661 
662  static void RegisterReflection() {
663  namespace refl = tvm::ffi::reflection;
664  refl::ObjectDef<InstanceNormAttrs>()
665  .def_ro("channel_axis", &InstanceNormAttrs::channel_axis,
666  "The axis that represents the channel.")
667  .def_ro("axes", &InstanceNormAttrs::axes,
668  "The axes that along which the normalization is applied.")
669  .def_ro("epsilon", &InstanceNormAttrs::epsilon,
670  "Small float added to variance to avoid dividing by zero")
671  .def_ro("center", &InstanceNormAttrs::center,
672  "Indicating if the beta offset will be added to the normalized tensor.")
673  .def_ro("scale", &InstanceNormAttrs::scale,
674  "Indicating if the gamma scale will be multiplied.");
675  }
676  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.InstanceNormAttrs", InstanceNormAttrs,
677  BaseAttrsNode);
678 }; // struct InstanceNormAttrs
679 
681 struct RMSNormAttrs : public AttrsNodeReflAdapter<RMSNormAttrs> {
682  ffi::Array<Integer> axes;
683  double epsilon;
684 
685  static void RegisterReflection() {
686  namespace refl = tvm::ffi::reflection;
687  refl::ObjectDef<RMSNormAttrs>()
688  .def_ro("axes", &RMSNormAttrs::axes,
689  "The axes that along which the normalization is applied.")
690  .def_ro("epsilon", &RMSNormAttrs::epsilon,
691  "Small float added to variance to avoid dividing by zero");
692  }
694 }; // struct RMSNormAttrs
695 
697 struct NLLLossAttrs : public AttrsNodeReflAdapter<NLLLossAttrs> {
698  ffi::String reduction;
700 
701  static void RegisterReflection() {
702  namespace refl = tvm::ffi::reflection;
703  refl::ObjectDef<NLLLossAttrs>()
704  .def_ro("reduction", &NLLLossAttrs::reduction,
705  "The reduction method to apply to the output. Can be"
706  "'none', 'mean' or 'sum'.",
707  refl::DefaultValue("mean"))
708  .def_ro("ignore_index", &NLLLossAttrs::ignore_index, "The target value to ignore.");
709  }
711 }; // struct NLLLossAttrs
712 
714 struct DropoutAttrs : public AttrsNodeReflAdapter<DropoutAttrs> {
715  double rate;
716 
717  static void RegisterReflection() {
718  namespace refl = tvm::ffi::reflection;
719  refl::ObjectDef<DropoutAttrs>().def_ro(
720  "rate", &DropoutAttrs::rate,
721  "Fraction of the input that gets dropped out during training time");
722  }
724 }; // struct DropoutAttrs
725 
727 struct AttentionAttrs : public AttrsNodeReflAdapter<AttentionAttrs> {
728  ffi::Optional<FloatImm> scale;
729  ffi::Optional<ffi::String> causal_mask;
730  ffi::Optional<IntImm> window_size;
731 
732  static void RegisterReflection() {
733  namespace refl = tvm::ffi::reflection;
734  refl::ObjectDef<AttentionAttrs>()
735  .def_ro(
736  "scale", &AttentionAttrs::scale,
737  "The custom scale applied before the softmax. The default value is 1 / sqrt(head_dim).")
738  .def_ro("causal_mask", &AttentionAttrs::causal_mask,
739  "The type of the causal mask, i.e. 'TopLeft' and 'BottomRight'.")
740  .def_ro("window_size", &AttentionAttrs::window_size,
741  "The size of the window for sliding-window attention.");
742  }
744 }; // struct AttentionAttrs
745 
747 struct PadAttrs : public AttrsNodeReflAdapter<PadAttrs> {
748  ffi::Array<Integer> pad_width;
749  double pad_value = 0.0;
750  tvm::ffi::String pad_mode;
751 
752  static void RegisterReflection() {
753  namespace refl = tvm::ffi::reflection;
754  refl::ObjectDef<PadAttrs>()
755  .def_ro("pad_width", &PadAttrs::pad_width,
756  "Number of values padded to the edges of each axis, "
757  "in the format of (before_1, after_1, ..., before_N, after_N)")
758  .def_ro("pad_value", &PadAttrs::pad_value, "The value to fill in padded area with",
759  refl::DefaultValue(0.0))
760  .def_ro("pad_mode", &PadAttrs::pad_mode,
761  "Padding type to use. \"constant\" pads with constant_value, "
762  "\"edge\" pads using the edge values of the input array, "
763  "\"reflect\" pads by reflecting values with respect to the edges.",
764  refl::DefaultValue("constant"));
765  }
767 };
768 
770 struct PixelShuffleAttrs : public AttrsNodeReflAdapter<PixelShuffleAttrs> {
772 
773  static void RegisterReflection() {
774  namespace refl = tvm::ffi::reflection;
775  refl::ObjectDef<PixelShuffleAttrs>().def_ro("upscale_factor",
777  "Scale factor for spatial upsampling.");
778  }
779  TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.PixelShuffleAttrs", PixelShuffleAttrs,
780  BaseAttrsNode);
781 };
782 
783 } // namespace relax
784 } // namespace tvm
785 
786 #endif // TVM_RELAX_ATTRS_NN_H_
Adapter for AttrsNode with the new reflection API.
Definition: attrs.h:384
Base class of all attribute class.
Definition: attrs.h:101
Runtime primitive data type.
Definition: data_type.h:47
Definition: repr_printer.h:91
An object that builds and maintains block scope and StmtSref mapping for Dependence analysis.
Definition: analyzer.h:37
Attributes for 1d adaptive pool operator.
Definition: nn.h:450
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool1DAttrs", AdaptivePool1DAttrs, BaseAttrsNode)
ffi::String out_layout
Definition: nn.h:453
ffi::Optional< ffi::Array< int64_t > > output_size
Definition: nn.h:451
ffi::String layout
Definition: nn.h:452
static void RegisterReflection()
Definition: nn.h:455
Attributes for 2d adaptive pool operator.
Definition: nn.h:475
ffi::Optional< ffi::Array< int64_t > > output_size
Definition: nn.h:476
static void RegisterReflection()
Definition: nn.h:480
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool2DAttrs", AdaptivePool2DAttrs, BaseAttrsNode)
ffi::String out_layout
Definition: nn.h:478
ffi::String layout
Definition: nn.h:477
Attributes for 3d adaptive pool operator.
Definition: nn.h:500
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AdaptivePool3DAttrs", AdaptivePool3DAttrs, BaseAttrsNode)
ffi::Optional< ffi::Array< int64_t > > output_size
Definition: nn.h:501
static void RegisterReflection()
Definition: nn.h:505
ffi::String layout
Definition: nn.h:502
ffi::String out_layout
Definition: nn.h:503
Attributes used in Attention operator.
Definition: nn.h:727
ffi::Optional< IntImm > window_size
Definition: nn.h:730
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.AttentionAttrs", AttentionAttrs, BaseAttrsNode)
ffi::Optional< FloatImm > scale
Definition: nn.h:728
ffi::Optional< ffi::String > causal_mask
Definition: nn.h:729
static void RegisterReflection()
Definition: nn.h:732
Attributes used in batch_norm operator.
Definition: nn.h:577
bool training
Definition: nn.h:583
bool scale
Definition: nn.h:581
static void RegisterReflection()
Definition: nn.h:585
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.BatchNormAttrs", BatchNormAttrs, BaseAttrsNode)
double epsilon
Definition: nn.h:579
int axis
Definition: nn.h:578
double momentum
Definition: nn.h:582
bool center
Definition: nn.h:580
Attributes used in Conv1d operator.
Definition: nn.h:33
ffi::String out_layout
Definition: nn.h:40
ffi::Array< int64_t > strides
Definition: nn.h:34
ffi::Array< int64_t > dilation
Definition: nn.h:36
int groups
Definition: nn.h:37
ffi::String data_layout
Definition: nn.h:38
static void RegisterReflection()
Definition: nn.h:43
ffi::Array< int64_t > padding
Definition: nn.h:35
DataType out_dtype
Definition: nn.h:41
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv1DAttrs", Conv1DAttrs, BaseAttrsNode)
ffi::String kernel_layout
Definition: nn.h:39
Attributes used in Conv1DTranspose operator.
Definition: nn.h:171
ffi::String data_layout
Definition: nn.h:177
static void RegisterReflection()
Definition: nn.h:182
ffi::Array< int64_t > dilation
Definition: nn.h:175
DataType out_dtype
Definition: nn.h:180
ffi::Array< int64_t > output_padding
Definition: nn.h:174
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv1DTransposeAttrs", Conv1DTransposeAttrs, BaseAttrsNode)
ffi::String out_layout
Definition: nn.h:179
ffi::Array< int64_t > strides
Definition: nn.h:172
ffi::Array< int64_t > padding
Definition: nn.h:173
int groups
Definition: nn.h:176
ffi::String kernel_layout
Definition: nn.h:178
Attributes used in Conv2d operator.
Definition: nn.h:77
ffi::String out_layout
Definition: nn.h:84
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv2DAttrs", Conv2DAttrs, BaseAttrsNode)
ffi::Array< int64_t > strides
Definition: nn.h:78
ffi::Array< int64_t > dilation
Definition: nn.h:80
DataType out_dtype
Definition: nn.h:85
ffi::String kernel_layout
Definition: nn.h:83
static void RegisterReflection()
Definition: nn.h:87
int groups
Definition: nn.h:81
ffi::String data_layout
Definition: nn.h:82
ffi::Array< int64_t > padding
Definition: nn.h:79
Attributes used in Conv2d operator.
Definition: nn.h:220
ffi::String kernel_layout
Definition: nn.h:227
ffi::Array< int64_t > dilation
Definition: nn.h:224
ffi::String data_layout
Definition: nn.h:226
ffi::Array< int64_t > padding
Definition: nn.h:222
int groups
Definition: nn.h:225
ffi::Array< int64_t > strides
Definition: nn.h:221
ffi::Array< int64_t > output_padding
Definition: nn.h:223
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv2DTransposeAttrs", Conv2DTransposeAttrs, BaseAttrsNode)
ffi::String out_layout
Definition: nn.h:228
static void RegisterReflection()
Definition: nn.h:231
DataType out_dtype
Definition: nn.h:229
Attributes used in Conv3d operator.
Definition: nn.h:123
ffi::String kernel_layout
Definition: nn.h:129
ffi::String out_layout
Definition: nn.h:130
ffi::Array< int64_t > strides
Definition: nn.h:124
static void RegisterReflection()
Definition: nn.h:133
ffi::Array< int64_t > dilation
Definition: nn.h:126
DataType out_dtype
Definition: nn.h:131
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv3DAttrs", Conv3DAttrs, BaseAttrsNode)
ffi::String data_layout
Definition: nn.h:128
ffi::Array< int64_t > padding
Definition: nn.h:125
int groups
Definition: nn.h:127
Attributes used in Conv3dTranspose operator.
Definition: nn.h:271
ffi::String out_layout
Definition: nn.h:279
ffi::Array< int64_t > padding
Definition: nn.h:273
ffi::Array< int64_t > strides
Definition: nn.h:272
ffi::Array< int64_t > dilation
Definition: nn.h:275
int groups
Definition: nn.h:276
static void RegisterReflection()
Definition: nn.h:282
DataType out_dtype
Definition: nn.h:280
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Conv3DTransposeAttrs", Conv3DTransposeAttrs, BaseAttrsNode)
ffi::String data_layout
Definition: nn.h:277
ffi::String kernel_layout
Definition: nn.h:278
ffi::Array< int64_t > output_padding
Definition: nn.h:274
Attributes used in dropout operator.
Definition: nn.h:714
double rate
Definition: nn.h:715
static void RegisterReflection()
Definition: nn.h:717
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.DropoutAttrs", DropoutAttrs, BaseAttrsNode)
Attributes used in group_norm operator.
Definition: nn.h:626
ffi::Array< Integer > axes
Definition: nn.h:629
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.GroupNormAttrs", GroupNormAttrs, BaseAttrsNode)
int num_groups
Definition: nn.h:627
int channel_axis
Definition: nn.h:628
double epsilon
Definition: nn.h:630
bool center
Definition: nn.h:631
static void RegisterReflection()
Definition: nn.h:634
bool scale
Definition: nn.h:632
Attributes used in instance_norm operator.
Definition: nn.h:655
bool center
Definition: nn.h:659
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.InstanceNormAttrs", InstanceNormAttrs, BaseAttrsNode)
double epsilon
Definition: nn.h:658
bool scale
Definition: nn.h:660
ffi::Array< Integer > axes
Definition: nn.h:657
static void RegisterReflection()
Definition: nn.h:662
int channel_axis
Definition: nn.h:656
Attributes used in layer_norm operator.
Definition: nn.h:604
ffi::Array< Integer > axes
Definition: nn.h:605
bool scale
Definition: nn.h:608
static void RegisterReflection()
Definition: nn.h:610
bool center
Definition: nn.h:607
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.LayerNormAttrs", LayerNormAttrs, BaseAttrsNode)
double epsilon
Definition: nn.h:606
Attributes used in softmax operators.
Definition: nn.h:537
static void RegisterReflection()
Definition: nn.h:540
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.LeakyReluAttrs", LeakyReluAttrs, BaseAttrsNode)
double alpha
Definition: nn.h:538
Attributes used in nll_loss operator.
Definition: nn.h:697
static void RegisterReflection()
Definition: nn.h:701
ffi::String reduction
Definition: nn.h:698
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.NLLLossAttrs", NLLLossAttrs, BaseAttrsNode)
int ignore_index
Definition: nn.h:699
Attributes used in PReLU operator.
Definition: nn.h:565
static void RegisterReflection()
Definition: nn.h:568
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.PReluAttrs", PReluAttrs, BaseAttrsNode)
int axis
Definition: nn.h:566
Attributes used for the padding operator.
Definition: nn.h:747
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.PadAttrs", PadAttrs, BaseAttrsNode)
ffi::Array< Integer > pad_width
Definition: nn.h:748
tvm::ffi::String pad_mode
Definition: nn.h:750
double pad_value
Definition: nn.h:749
static void RegisterReflection()
Definition: nn.h:752
Attributes used for the pixel shuffle operator.
Definition: nn.h:770
int upscale_factor
Definition: nn.h:771
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.PixelShuffleAttrs", PixelShuffleAttrs, BaseAttrsNode)
static void RegisterReflection()
Definition: nn.h:773
Attributes used in max_pool1d and avg_pool1d operator.
Definition: nn.h:323
ffi::Array< int64_t > strides
Definition: nn.h:325
ffi::Array< int64_t > dilation
Definition: nn.h:327
static void RegisterReflection()
Definition: nn.h:333
ffi::Array< int64_t > pool_size
Definition: nn.h:324
ffi::String layout
Definition: nn.h:330
ffi::Array< int64_t > padding
Definition: nn.h:326
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Pool1DAttrs", Pool1DAttrs, BaseAttrsNode)
bool count_include_pad
Definition: nn.h:329
ffi::String out_layout
Definition: nn.h:331
bool ceil_mode
Definition: nn.h:328
Attributes used in max_pool2d and avg_pool2d operator.
Definition: nn.h:364
ffi::Array< int64_t > pool_size
Definition: nn.h:365
static void RegisterReflection()
Definition: nn.h:374
bool count_include_pad
Definition: nn.h:370
ffi::Array< int64_t > dilation
Definition: nn.h:368
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Pool2DAttrs", Pool2DAttrs, BaseAttrsNode)
ffi::String out_layout
Definition: nn.h:372
ffi::Array< int64_t > padding
Definition: nn.h:367
bool ceil_mode
Definition: nn.h:369
ffi::Array< int64_t > strides
Definition: nn.h:366
ffi::String layout
Definition: nn.h:371
Attributes used in max_pool3d and avg_pool3d operator.
Definition: nn.h:407
ffi::Array< int64_t > strides
Definition: nn.h:409
bool ceil_mode
Definition: nn.h:412
ffi::String out_layout
Definition: nn.h:415
ffi::String layout
Definition: nn.h:414
static void RegisterReflection()
Definition: nn.h:417
bool count_include_pad
Definition: nn.h:413
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.Pool3DAttrs", Pool3DAttrs, BaseAttrsNode)
ffi::Array< int64_t > dilation
Definition: nn.h:411
ffi::Array< int64_t > padding
Definition: nn.h:410
ffi::Array< int64_t > pool_size
Definition: nn.h:408
Attributes used in rms_norm operator.
Definition: nn.h:681
ffi::Array< Integer > axes
Definition: nn.h:682
double epsilon
Definition: nn.h:683
static void RegisterReflection()
Definition: nn.h:685
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.RMSNormAttrs", RMSNormAttrs, BaseAttrsNode)
Attributes used in softmax operators.
Definition: nn.h:525
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.SoftmaxAttrs", SoftmaxAttrs, BaseAttrsNode)
int axis
Definition: nn.h:526
static void RegisterReflection()
Definition: nn.h:528
Attributes used in softplus operators.
Definition: nn.h:549
double threshold
Definition: nn.h:551
double beta
Definition: nn.h:550
TVM_FFI_DECLARE_OBJECT_INFO_FINAL("relax.attrs.SoftplusAttrs", SoftplusAttrs, BaseAttrsNode)
static void RegisterReflection()
Definition: nn.h:553