24 #ifndef TVM_RELAX_ATTRS_NN_H_
25 #define TVM_RELAX_ATTRS_NN_H_
46 "If padding is non-zero, then the input is implicitly zero-padded"
47 "Padding support both symmetric and asymmetric as"
48 "one int : same padding used on both sides"
49 "two int : padding width in the order of (left, right)");
51 "Specifies the dilation rate to use for dilated convolution.");
53 "Number of groups to split the input into for grouped convolution. The number of input and "
54 "output channels should be divisible by the number of groups.");
57 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
58 "'N', 'C', 'W' stands for batch, channel, width"
59 "dimensions respectively. Convolution is applied on the 'W' dimensions.");
62 "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
63 "'O', 'I', 'W' stands for num_filter, input_channel, and width"
64 "dimensions respectively.");
67 "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
68 "'N', 'C', 'W' stands for batch, channel, and width"
69 "dimensions respectively. Default to be same as input layout.");
71 "Output data type, set to explicit type under mixed precision setting");
89 "If padding is non-zero, then the input is implicitly zero-padded"
90 "Padding support both symmetric and asymmetric as"
91 "one int : same padding used on all sides"
92 "two int : bottom, right will use same padding as top, left"
93 "four int : padding width in the order of (top, left, bottom, right)");
95 "Specifies the dilation rate to use for dilated convolution.");
97 "Number of groups to split the input into for grouped convolution. The number of input and "
98 "output channels should be divisible by the number of groups.");
101 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
102 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
103 "dimensions respectively. Convolution is applied on the 'H' and"
107 "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
108 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
109 "dimensions respectively.");
112 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
113 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
114 "dimensions respectively. Default to be same as input layout.");
116 "Output data type, set to explicit type under mixed precision setting");
134 "If padding is non-zero, then the input is implicitly zero-padded"
135 "Padding support both symmetric and asymmetric as"
136 "one int : same padding used on all sides"
137 "two int : bottom, right will use same padding as top, left"
138 "four int : padding width in the order of (forward, back, top, left, bottom, right)");
140 "Specifies the dilation rate to use for dilated convolution.");
142 "Number of groups to split the input into for grouped convolution. The number of input and "
143 "output channels should be divisible by the number of groups.");
146 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
147 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
148 "dimensions respectively. Convolution is applied on the 'D', 'H', and"
152 "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
153 "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width"
154 "dimensions respectively.");
157 "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
158 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
159 "dimensions respectively. Default to be same as input layout.");
161 "Output data type, set to explicit type under mixed precision setting");
180 "If padding is non-zero, then the input is implicitly zero-padded"
181 "Padding support both symmetric and asymmetric as"
182 "one int : same padding used on both sides"
183 "two int : padding width in the order of (left, right)");
186 "Specifies the dilation rate to use for dilated convolution.");
188 "Number of groups to split the input into for grouped convolution. The number of input and "
189 "output channels should be divisible by the number of groups.");
192 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
193 "'N', 'C', 'W' stands for batch, channel, width"
194 "dimensions respectively. Convolution is applied on the 'W' dimensions.");
197 "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
198 "'O', 'I', 'W' stands for num_filter, input_channel, and width"
199 "dimensions respectively.");
202 "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
203 "'N', 'C', 'W' stands for batch, channel, and width"
204 "dimensions respectively. Default to be same as input layout.");
206 "Output data type, set to explicit type under mixed precision setting");
225 "If padding is non-zero, then the input is implicitly zero-padded"
226 "Padding support both symmetric and asymmetric as"
227 "one int : same padding used on all sides"
228 "two int : bottom, right will use same padding as top, left"
229 "four int : padding width in the order of (top, left, bottom, right)");
232 "Specifies the dilation rate to use for dilated convolution.");
234 "Number of groups to split the input into for grouped convolution. The number of input and "
235 "output channels should be divisible by the number of groups.");
238 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
239 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
240 "dimensions respectively. Convolution is applied on the 'H' and"
244 "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
245 "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
246 "dimensions respectively.");
249 "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
250 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
251 "dimensions respectively. Default to be same as input layout.");
253 "Output data type, set to explicit type under mixed precision setting");
273 "If padding is non-zero, then the input is implicitly zero-padded"
274 "Padding support both symmetric and asymmetric as"
275 "one int : same padding used on all sides"
276 "two int : padding width in the order of (left, right)");
278 "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
279 "every element in the input tensor will be covered by a sliding window.");
281 .describe(
"When true, will include padding to compute the average");
283 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
284 "'N', 'C', 'W' stands for batch, channel, and width"
285 "dimensions respectively. Pooling is applied on the 'W' dimensions.");
288 "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
289 "'N', 'C', 'W' stands for batch, channel, and width"
290 "dimensions respectively. Pooling is applied on the 'W' dimensions.");
310 "If padding is non-zero, then the input is implicitly zero-padded"
311 "Padding support both symmetric and asymmetric as"
312 "one int : same padding used on all sides"
313 "two int : bottom, right will use same padding as top, left"
314 "four int : padding width in the order of (top, left, bottom, right)");
316 "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
317 "every element in the input tensor will be covered by a sliding window.");
319 .describe(
"When true, will include padding to compute the average");
321 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
322 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
323 "dimensions respectively. Pooling is applied on the 'H' and"
327 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
328 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
329 "dimensions respectively. Pooling is applied on the 'H' and"
350 "If padding is non-zero, then the input is implicitly zero-padded"
351 "Padding support both symmetric and asymmetric as"
352 "one int : same padding used on all sides"
353 "three int : back, bottom, right will use same padding as front, top, left"
354 "four int : padding width in the order of (front, top, left, back, bottom, right)");
356 "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
357 "every element in the input tensor will be covered by a sliding window.");
359 .describe(
"When true, will include padding to compute the average");
361 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
362 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
363 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
367 "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
368 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
369 "dimensions respectively. Pooling is applied on the 'D', 'H' and"
383 "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
384 "'N', 'C', 'W' stands for batch, channel and width"
385 "dimensions respectively. Pooling is applied on the"
389 "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
390 "'N', 'C', 'W' stands for batch, channel and width"
391 "dimensions respectively. Pooling is applied on the"
405 "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
406 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
407 "dimensions respectively. Pooling is applied on the 'H' and"
411 "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
412 "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
413 "dimensions respectively. Pooling is applied on the 'H' and"
427 "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
428 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
429 "dimensions respectively. Pooling is applied on 'D', 'H' and"
433 "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
434 "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
435 "dimensions respectively. Pooling is applied on 'D', 'H' and"
470 "Indicating if the beta offset will be added to the normalized tensor.");
484 TVM_ATTR_FIELD(
axes).describe(
"The axes that along which the normalization is applied.");
487 "Indicating if the beta offset will be added to the normalized tensor.");
505 "The axes that along which the normalization is applied (excluding the channel axis).");
508 "Indicating if the beta offset will be added to the normalized tensor.");
519 TVM_ATTR_FIELD(
axes).describe(
"The axes that along which the normalization is applied.");
531 "The reduction method to apply to the output. Can be"
532 "'none', 'mean' or 'sum'.");
543 "Fraction of the input that gets dropped out during training time");
555 "The custom scale applied before the softmax. The default value is 1 / sqrt(head_dim).");
557 .describe(
"The type of the causal mask, i.e. 'TopLeft' and 'BottomRight'.");
569 "Number of values padded to the edges of each axis, "
570 "in the format of (before_1, after_1, ..., before_N, after_N)");
572 .set_default(
"constant")
574 "Padding type to use. \"constant\" pads with constant_value, "
575 "\"edge\" pads using the edge values of the input array, "
576 "\"reflect\" pads by reflecting values with respect to the edges.");
The base class of the all the Use "curiously recurring template pattern".
Definition: attrs.h:870
Array, container representing a contiguous sequence of ObjectRefs.
Definition: array.h:289
Runtime primitive data type.
Definition: data_type.h:43
Optional container that to represent to a Nullable variant of T.
Definition: optional.h:51
Reference to string objects.
Definition: string.h:98
#define TVM_ATTR_FIELD(FieldName)
Declare an attribute field.
Definition: attrs.h:76
runtime implementation for LibTorch/TorchScript.
Definition: analyzer.h:36
Attributes for 1d adaptive pool operator.
Definition: nn.h:375
Optional< Array< IntImm > > output_size
Definition: nn.h:376
TVM_DECLARE_ATTRS(AdaptivePool1DAttrs, "relax.attrs.AdaptivePool1DAttrs")
Definition: nn.h:380
String layout
Definition: nn.h:377
String out_layout
Definition: nn.h:378
Attributes for 2d adaptive pool operator.
Definition: nn.h:397
TVM_DECLARE_ATTRS(AdaptivePool2DAttrs, "relax.attrs.AdaptivePool2DAttrs")
Definition: nn.h:402
Optional< Array< IntImm > > output_size
Definition: nn.h:398
String layout
Definition: nn.h:399
String out_layout
Definition: nn.h:400
Attributes for 3d adaptive pool operator.
Definition: nn.h:419
TVM_DECLARE_ATTRS(AdaptivePool3DAttrs, "relax.attrs.AdaptivePool3DAttrs")
Definition: nn.h:424
String layout
Definition: nn.h:421
String out_layout
Definition: nn.h:422
Optional< Array< IntImm > > output_size
Definition: nn.h:420
Attributes used in Attention operator.
Definition: nn.h:548
Optional< String > causal_mask
Definition: nn.h:550
TVM_DECLARE_ATTRS(AttentionAttrs, "relax.attrs.AttentionAttrs")
Definition: nn.h:553
Optional< FloatImm > scale
Definition: nn.h:549
Optional< IntImm > window_size
Definition: nn.h:551
Attributes used in batch_norm operator.
Definition: nn.h:459
TVM_DECLARE_ATTRS(BatchNormAttrs, "relax.attrs.BatchNormAttrs")
Definition: nn.h:466
bool scale
Definition: nn.h:463
double epsilon
Definition: nn.h:461
int axis
Definition: nn.h:460
double momentum
Definition: nn.h:464
bool center
Definition: nn.h:462
Attributes used in Conv1d operator.
Definition: nn.h:33
Array< IntImm > dilation
Definition: nn.h:36
String out_layout
Definition: nn.h:40
int groups
Definition: nn.h:37
Array< IntImm > padding
Definition: nn.h:35
Array< IntImm > strides
Definition: nn.h:34
String data_layout
Definition: nn.h:38
DataType out_dtype
Definition: nn.h:41
String kernel_layout
Definition: nn.h:39
TVM_DECLARE_ATTRS(Conv1DAttrs, "relax.attrs.Conv1DAttrs")
Definition: nn.h:43
Attributes used in Conv1DTranspose operator.
Definition: nn.h:166
Array< IntImm > output_padding
Definition: nn.h:169
String data_layout
Definition: nn.h:172
Array< IntImm > dilation
Definition: nn.h:170
Array< IntImm > strides
Definition: nn.h:167
DataType out_dtype
Definition: nn.h:175
TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relax.attrs.Conv1DTransposeAttrs")
Definition: nn.h:177
String out_layout
Definition: nn.h:174
Array< IntImm > padding
Definition: nn.h:168
String kernel_layout
Definition: nn.h:173
int groups
Definition: nn.h:171
Attributes used in Conv2d operator.
Definition: nn.h:76
TVM_DECLARE_ATTRS(Conv2DAttrs, "relax.attrs.Conv2DAttrs")
Definition: nn.h:86
String kernel_layout
Definition: nn.h:82
DataType out_dtype
Definition: nn.h:84
String data_layout
Definition: nn.h:81
int groups
Definition: nn.h:80
Array< IntImm > strides
Definition: nn.h:77
Array< IntImm > dilation
Definition: nn.h:79
Array< IntImm > padding
Definition: nn.h:78
String out_layout
Definition: nn.h:83
Attributes used in Conv2d operator.
Definition: nn.h:211
Array< IntImm > dilation
Definition: nn.h:215
Array< IntImm > output_padding
Definition: nn.h:214
Array< IntImm > padding
Definition: nn.h:213
Array< IntImm > strides
Definition: nn.h:212
TVM_DECLARE_ATTRS(Conv2DTransposeAttrs, "relax.attrs.Conv2DTransposeAttrs")
Definition: nn.h:222
String kernel_layout
Definition: nn.h:218
int groups
Definition: nn.h:216
String data_layout
Definition: nn.h:217
String out_layout
Definition: nn.h:219
DataType out_dtype
Definition: nn.h:220
Attributes used in Conv3d operator.
Definition: nn.h:121
String out_layout
Definition: nn.h:128
TVM_DECLARE_ATTRS(Conv3DAttrs, "relax.attrs.Conv3DAttrs")
Definition: nn.h:131
Array< IntImm > dilation
Definition: nn.h:124
String data_layout
Definition: nn.h:126
Array< IntImm > strides
Definition: nn.h:122
DataType out_dtype
Definition: nn.h:129
Array< IntImm > padding
Definition: nn.h:123
String kernel_layout
Definition: nn.h:127
int groups
Definition: nn.h:125
Attributes used in dropout operator.
Definition: nn.h:538
double rate
Definition: nn.h:539
TVM_DECLARE_ATTRS(DropoutAttrs, "relax.attrs.DropoutAttrs")
Definition: nn.h:541
Attributes used in group_norm operator.
Definition: nn.h:493
int num_groups
Definition: nn.h:494
int channel_axis
Definition: nn.h:495
double epsilon
Definition: nn.h:497
TVM_DECLARE_ATTRS(GroupNormAttrs, "relax.attrs.GroupNormAttrs")
Definition: nn.h:501
Array< Integer > axes
Definition: nn.h:496
bool center
Definition: nn.h:498
bool scale
Definition: nn.h:499
Attributes used in layer_norm operator.
Definition: nn.h:477
bool scale
Definition: nn.h:481
TVM_DECLARE_ATTRS(LayerNormAttrs, "relax.attrs.LayerNormAttrs")
Definition: nn.h:483
bool center
Definition: nn.h:480
Array< Integer > axes
Definition: nn.h:478
double epsilon
Definition: nn.h:479
Attributes used in softmax operators.
Definition: nn.h:450
TVM_DECLARE_ATTRS(LeakyReluAttrs, "relax.attrs.LeakyReluAttrs")
Definition: nn.h:453
double alpha
Definition: nn.h:451
Attributes used in nll_loss operator.
Definition: nn.h:525
int ignore_index
Definition: nn.h:527
TVM_DECLARE_ATTRS(NLLLossAttrs, "relax.attrs.NLLLossAttrs")
Definition: nn.h:529
String reduction
Definition: nn.h:526
Attributes used for the padding operator.
Definition: nn.h:563
TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs")
Definition: nn.h:567
tvm::String pad_mode
Definition: nn.h:565
Array< Integer > pad_width
Definition: nn.h:564
Attributes used in max_pool1d and avg_pool1d operator.
Definition: nn.h:258
Array< IntImm > padding
Definition: nn.h:261
TVM_DECLARE_ATTRS(Pool1DAttrs, "relax.attrs.Pool1DAttrs")
Definition: nn.h:268
bool count_include_pad
Definition: nn.h:264
String layout
Definition: nn.h:265
Array< IntImm > strides
Definition: nn.h:260
Array< IntImm > dilation
Definition: nn.h:262
bool ceil_mode
Definition: nn.h:263
Array< IntImm > pool_size
Definition: nn.h:259
String out_layout
Definition: nn.h:266
Attributes used in max_pool2d and avg_pool2d operator.
Definition: nn.h:295
Array< IntImm > padding
Definition: nn.h:298
String layout
Definition: nn.h:302
bool count_include_pad
Definition: nn.h:301
String out_layout
Definition: nn.h:303
Array< IntImm > dilation
Definition: nn.h:299
bool ceil_mode
Definition: nn.h:300
Array< IntImm > strides
Definition: nn.h:297
Array< IntImm > pool_size
Definition: nn.h:296
TVM_DECLARE_ATTRS(Pool2DAttrs, "relax.attrs.Pool2DAttrs")
Definition: nn.h:305
Attributes used in max_pool3d and avg_pool3d operator.
Definition: nn.h:335
bool ceil_mode
Definition: nn.h:340
Array< IntImm > dilation
Definition: nn.h:339
String layout
Definition: nn.h:342
bool count_include_pad
Definition: nn.h:341
String out_layout
Definition: nn.h:343
Array< IntImm > pool_size
Definition: nn.h:336
Array< IntImm > strides
Definition: nn.h:337
TVM_DECLARE_ATTRS(Pool3DAttrs, "relax.attrs.Pool3DAttrs")
Definition: nn.h:345
Array< IntImm > padding
Definition: nn.h:338
Attributes used in rms_norm operator.
Definition: nn.h:514
TVM_DECLARE_ATTRS(RMSNormAttrs, "relax.attrs.RMSNormAttrs")
Definition: nn.h:518
double epsilon
Definition: nn.h:516
Array< Integer > axes
Definition: nn.h:515
Attributes used in softmax operators.
Definition: nn.h:441
int axis
Definition: nn.h:442
TVM_DECLARE_ATTRS(SoftmaxAttrs, "relax.attrs.SoftmaxAttrs")
Definition: nn.h:444