tvm
nn.h
Go to the documentation of this file.
1 /*
2  * Licensed to the Apache Software Foundation (ASF) under one
3  * or more contributor license agreements. See the NOTICE file
4  * distributed with this work for additional information
5  * regarding copyright ownership. The ASF licenses this file
6  * to you under the Apache License, Version 2.0 (the
7  * "License"); you may not use this file except in compliance
8  * with the License. You may obtain a copy of the License at
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing,
13  * software distributed under the License is distributed on an
14  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15  * KIND, either express or implied. See the License for the
16  * specific language governing permissions and limitations
17  * under the License.
18  */
19 
24 #ifndef TVM_RELAX_ATTRS_NN_H_
25 #define TVM_RELAX_ATTRS_NN_H_
26 
27 #include <tvm/relax/expr.h>
28 
29 namespace tvm {
30 namespace relax {
31 
33 struct Conv1DAttrs : public tvm::AttrsNode<Conv1DAttrs> {
37  int groups;
42 
43  TVM_DECLARE_ATTRS(Conv1DAttrs, "relax.attrs.Conv1DAttrs") {
44  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
45  TVM_ATTR_FIELD(padding).describe(
46  "If padding is non-zero, then the input is implicitly zero-padded"
47  "Padding support both symmetric and asymmetric as"
48  "one int : same padding used on both sides"
49  "two int : padding width in the order of (left, right)");
50  TVM_ATTR_FIELD(dilation).describe(
51  "Specifies the dilation rate to use for dilated convolution.");
52  TVM_ATTR_FIELD(groups).describe(
53  "Number of groups to split the input into for grouped convolution. The number of input and "
54  "output channels should be divisible by the number of groups.");
56  .describe(
57  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
58  "'N', 'C', 'W' stands for batch, channel, width"
59  "dimensions respectively. Convolution is applied on the 'W' dimensions.");
61  .describe(
62  "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
63  "'O', 'I', 'W' stands for num_filter, input_channel, and width"
64  "dimensions respectively.");
66  .describe(
67  "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
68  "'N', 'C', 'W' stands for batch, channel, and width"
69  "dimensions respectively. Default to be same as input layout.");
70  TVM_ATTR_FIELD(out_dtype).describe(
71  "Output data type, set to explicit type under mixed precision setting");
72  }
73 }; // struct Conv1dAttrs
74 
76 struct Conv2DAttrs : public tvm::AttrsNode<Conv2DAttrs> {
80  int groups;
85 
86  TVM_DECLARE_ATTRS(Conv2DAttrs, "relax.attrs.Conv2DAttrs") {
87  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
88  TVM_ATTR_FIELD(padding).describe(
89  "If padding is non-zero, then the input is implicitly zero-padded"
90  "Padding support both symmetric and asymmetric as"
91  "one int : same padding used on all sides"
92  "two int : bottom, right will use same padding as top, left"
93  "four int : padding width in the order of (top, left, bottom, right)");
94  TVM_ATTR_FIELD(dilation).describe(
95  "Specifies the dilation rate to use for dilated convolution.");
96  TVM_ATTR_FIELD(groups).describe(
97  "Number of groups to split the input into for grouped convolution. The number of input and "
98  "output channels should be divisible by the number of groups.");
100  .describe(
101  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
102  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
103  "dimensions respectively. Convolution is applied on the 'H' and"
104  "'W' dimensions.");
106  .describe(
107  "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
108  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
109  "dimensions respectively.");
111  .describe(
112  "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
113  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
114  "dimensions respectively. Default to be same as input layout.");
115  TVM_ATTR_FIELD(out_dtype).describe(
116  "Output data type, set to explicit type under mixed precision setting");
117  }
118 }; // struct Conv2dAttrs
119 
121 struct Conv3DAttrs : public tvm::AttrsNode<Conv3DAttrs> {
125  int groups;
130 
131  TVM_DECLARE_ATTRS(Conv3DAttrs, "relax.attrs.Conv3DAttrs") {
132  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
133  TVM_ATTR_FIELD(padding).describe(
134  "If padding is non-zero, then the input is implicitly zero-padded"
135  "Padding support both symmetric and asymmetric as"
136  "one int : same padding used on all sides"
137  "two int : bottom, right will use same padding as top, left"
138  "four int : padding width in the order of (forward, back, top, left, bottom, right)");
139  TVM_ATTR_FIELD(dilation).describe(
140  "Specifies the dilation rate to use for dilated convolution.");
141  TVM_ATTR_FIELD(groups).describe(
142  "Number of groups to split the input into for grouped convolution. The number of input and "
143  "output channels should be divisible by the number of groups.");
145  .describe(
146  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
147  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
148  "dimensions respectively. Convolution is applied on the 'D', 'H', and"
149  "'W' dimensions.");
151  .describe(
152  "Dimension ordering of weight. Can be 'OIDHW', 'OIDHW16o16i', etc."
153  "'O', 'I', 'D', 'H', 'W' stands for num_filter, input_channel, depth, height, and width"
154  "dimensions respectively.");
156  .describe(
157  "Dimension ordering of output. Can be 'NCDHW', 'NDHWC', etc."
158  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
159  "dimensions respectively. Default to be same as input layout.");
160  TVM_ATTR_FIELD(out_dtype).describe(
161  "Output data type, set to explicit type under mixed precision setting");
162  }
163 }; // struct Conv3dAttrs
164 
166 struct Conv1DTransposeAttrs : public tvm::AttrsNode<Conv1DTransposeAttrs> {
171  int groups;
176 
177  TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relax.attrs.Conv1DTransposeAttrs") {
178  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
179  TVM_ATTR_FIELD(padding).describe(
180  "If padding is non-zero, then the input is implicitly zero-padded"
181  "Padding support both symmetric and asymmetric as"
182  "one int : same padding used on both sides"
183  "two int : padding width in the order of (left, right)");
184  TVM_ATTR_FIELD(output_padding).describe("Used to disambiguate the output shape.");
185  TVM_ATTR_FIELD(dilation).describe(
186  "Specifies the dilation rate to use for dilated convolution.");
187  TVM_ATTR_FIELD(groups).describe(
188  "Number of groups to split the input into for grouped convolution. The number of input and "
189  "output channels should be divisible by the number of groups.");
191  .describe(
192  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
193  "'N', 'C', 'W' stands for batch, channel, width"
194  "dimensions respectively. Convolution is applied on the 'W' dimensions.");
196  .describe(
197  "Dimension ordering of weight. Can be 'OIW', 'IOW', etc."
198  "'O', 'I', 'W' stands for num_filter, input_channel, and width"
199  "dimensions respectively.");
201  .describe(
202  "Dimension ordering of output. Can be 'NCW', 'NWC', etc."
203  "'N', 'C', 'W' stands for batch, channel, and width"
204  "dimensions respectively. Default to be same as input layout.");
205  TVM_ATTR_FIELD(out_dtype).describe(
206  "Output data type, set to explicit type under mixed precision setting");
207  }
208 }; // struct Conv1DTransposeAttrs
209 
211 struct Conv2DTransposeAttrs : public tvm::AttrsNode<Conv2DTransposeAttrs> {
216  int groups;
221 
222  TVM_DECLARE_ATTRS(Conv2DTransposeAttrs, "relax.attrs.Conv2DTransposeAttrs") {
223  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
224  TVM_ATTR_FIELD(padding).describe(
225  "If padding is non-zero, then the input is implicitly zero-padded"
226  "Padding support both symmetric and asymmetric as"
227  "one int : same padding used on all sides"
228  "two int : bottom, right will use same padding as top, left"
229  "four int : padding width in the order of (top, left, bottom, right)");
230  TVM_ATTR_FIELD(output_padding).describe("Used to disambiguate the output shape.");
231  TVM_ATTR_FIELD(dilation).describe(
232  "Specifies the dilation rate to use for dilated convolution.");
233  TVM_ATTR_FIELD(groups).describe(
234  "Number of groups to split the input into for grouped convolution. The number of input and "
235  "output channels should be divisible by the number of groups.");
237  .describe(
238  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
239  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
240  "dimensions respectively. Convolution is applied on the 'H' and"
241  "'W' dimensions.");
243  .describe(
244  "Dimension ordering of weight. Can be 'OIHW', 'OIHW16o16i', etc."
245  "'O', 'I', 'H', 'W' stands for num_filter, input_channel, height, and width"
246  "dimensions respectively.");
248  .describe(
249  "Dimension ordering of output. Can be 'NCHW', 'NHWC', etc."
250  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
251  "dimensions respectively. Default to be same as input layout.");
252  TVM_ATTR_FIELD(out_dtype).describe(
253  "Output data type, set to explicit type under mixed precision setting");
254  }
255 }; // struct Conv2DTransposeAttrs
256 
258 struct Pool1DAttrs : public tvm::AttrsNode<Pool1DAttrs> {
263  bool ceil_mode;
267 
268  TVM_DECLARE_ATTRS(Pool1DAttrs, "relax.attrs.Pool1DAttrs") {
269  TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
270  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
271  TVM_ATTR_FIELD(dilation).describe("Specifies the dilation of the convolution.");
272  TVM_ATTR_FIELD(padding).describe(
273  "If padding is non-zero, then the input is implicitly zero-padded"
274  "Padding support both symmetric and asymmetric as"
275  "one int : same padding used on all sides"
276  "two int : padding width in the order of (left, right)");
277  TVM_ATTR_FIELD(ceil_mode).describe(
278  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
279  "every element in the input tensor will be covered by a sliding window.");
281  .describe("When true, will include padding to compute the average");
282  TVM_ATTR_FIELD(layout).set_default("NCW").describe(
283  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
284  "'N', 'C', 'W' stands for batch, channel, and width"
285  "dimensions respectively. Pooling is applied on the 'W' dimensions.");
287  .describe(
288  "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
289  "'N', 'C', 'W' stands for batch, channel, and width"
290  "dimensions respectively. Pooling is applied on the 'W' dimensions.");
291  }
292 }; // struct Pool1dAttrs
293 
295 struct Pool2DAttrs : public tvm::AttrsNode<Pool2DAttrs> {
300  bool ceil_mode;
304 
305  TVM_DECLARE_ATTRS(Pool2DAttrs, "relax.attrs.Pool2DAttrs") {
306  TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
307  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
308  TVM_ATTR_FIELD(dilation).describe("Specifies the dilation of the convolution.");
309  TVM_ATTR_FIELD(padding).describe(
310  "If padding is non-zero, then the input is implicitly zero-padded"
311  "Padding support both symmetric and asymmetric as"
312  "one int : same padding used on all sides"
313  "two int : bottom, right will use same padding as top, left"
314  "four int : padding width in the order of (top, left, bottom, right)");
315  TVM_ATTR_FIELD(ceil_mode).describe(
316  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
317  "every element in the input tensor will be covered by a sliding window.");
319  .describe("When true, will include padding to compute the average");
320  TVM_ATTR_FIELD(layout).describe(
321  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
322  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
323  "dimensions respectively. Pooling is applied on the 'H' and"
324  "'W' dimensions.");
326  .describe(
327  "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
328  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
329  "dimensions respectively. Pooling is applied on the 'H' and"
330  "'W' dimensions.");
331  }
332 }; // struct Pool2dAttrs
333 
335 struct Pool3DAttrs : public tvm::AttrsNode<Pool3DAttrs> {
340  bool ceil_mode;
344 
345  TVM_DECLARE_ATTRS(Pool3DAttrs, "relax.attrs.Pool3DAttrs") {
346  TVM_ATTR_FIELD(pool_size).describe("Size of the pooling windows.");
347  TVM_ATTR_FIELD(strides).describe("Specifies the strides of the convolution.");
348  TVM_ATTR_FIELD(dilation).describe("Specifies the dilation of the convolution.");
349  TVM_ATTR_FIELD(padding).describe(
350  "If padding is non-zero, then the input is implicitly zero-padded"
351  "Padding support both symmetric and asymmetric as"
352  "one int : same padding used on all sides"
353  "three int : back, bottom, right will use same padding as front, top, left"
354  "four int : padding width in the order of (front, top, left, back, bottom, right)");
355  TVM_ATTR_FIELD(ceil_mode).describe(
356  "A boolean indicating if use ceil or floor to compute the output shape. By using ceil, "
357  "every element in the input tensor will be covered by a sliding window.");
359  .describe("When true, will include padding to compute the average");
360  TVM_ATTR_FIELD(layout).describe(
361  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
362  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
363  "dimensions respectively. Pooling is applied on the 'D', 'H' and"
364  "'W' dimensions.");
366  .describe(
367  "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
368  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
369  "dimensions respectively. Pooling is applied on the 'D', 'H' and"
370  "'W' dimensions.");
371  }
372 }; // struct Pool3dAttrs
373 
375 struct AdaptivePool1DAttrs : public tvm::AttrsNode<AdaptivePool1DAttrs> {
379 
380  TVM_DECLARE_ATTRS(AdaptivePool1DAttrs, "relax.attrs.AdaptivePool1DAttrs") {
381  TVM_ATTR_FIELD(output_size).describe("Output width.");
382  TVM_ATTR_FIELD(layout).describe(
383  "Dimension ordering of input data. Can be 'NCW', 'NWC', etc."
384  "'N', 'C', 'W' stands for batch, channel and width"
385  "dimensions respectively. Pooling is applied on the"
386  "'W' dimensions.");
388  .describe(
389  "Dimension ordering of output data. Can be 'NCW', 'NWC', etc."
390  "'N', 'C', 'W' stands for batch, channel and width"
391  "dimensions respectively. Pooling is applied on the"
392  "'W' dimensions.");
393  }
394 }; // struct AdaptivePool1DAttrs
395 
397 struct AdaptivePool2DAttrs : public tvm::AttrsNode<AdaptivePool2DAttrs> {
401 
402  TVM_DECLARE_ATTRS(AdaptivePool2DAttrs, "relax.attrs.AdaptivePool2DAttrs") {
403  TVM_ATTR_FIELD(output_size).describe("Output height and width.");
404  TVM_ATTR_FIELD(layout).describe(
405  "Dimension ordering of input data. Can be 'NCHW', 'NHWC', etc."
406  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
407  "dimensions respectively. Pooling is applied on the 'H' and"
408  "'W' dimensions.");
410  .describe(
411  "Dimension ordering of output data. Can be 'NCHW', 'NHWC', etc."
412  "'N', 'C', 'H', 'W' stands for batch, channel, height, and width"
413  "dimensions respectively. Pooling is applied on the 'H' and"
414  "'W' dimensions.");
415  }
416 }; // struct AdaptivePool2DAttrs
417 
419 struct AdaptivePool3DAttrs : public tvm::AttrsNode<AdaptivePool3DAttrs> {
423 
424  TVM_DECLARE_ATTRS(AdaptivePool3DAttrs, "relax.attrs.AdaptivePool3DAttrs") {
425  TVM_ATTR_FIELD(output_size).describe("Output depth, height and width.");
426  TVM_ATTR_FIELD(layout).describe(
427  "Dimension ordering of input data. Can be 'NCDHW', 'NDHWC', etc."
428  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
429  "dimensions respectively. Pooling is applied on 'D', 'H' and"
430  "'W' dimensions.");
432  .describe(
433  "Dimension ordering of output data. Can be 'NCDHW', 'NDHWC', etc."
434  "'N', 'C', 'D', 'H', 'W' stands for batch, channel, depth, height, and width"
435  "dimensions respectively. Pooling is applied on 'D', 'H' and"
436  "'W' dimensions.");
437  }
438 }; // struct AdaptivePool3DAttrs
439 
441 struct SoftmaxAttrs : public tvm::AttrsNode<SoftmaxAttrs> {
442  int axis;
443 
444  TVM_DECLARE_ATTRS(SoftmaxAttrs, "relax.attrs.SoftmaxAttrs") {
445  TVM_ATTR_FIELD(axis).describe("The axis to sum over when computing softmax.");
446  }
447 };
448 
450 struct LeakyReluAttrs : public tvm::AttrsNode<LeakyReluAttrs> {
451  double alpha;
452 
453  TVM_DECLARE_ATTRS(LeakyReluAttrs, "relax.attrs.LeakyReluAttrs") {
454  TVM_ATTR_FIELD(alpha).describe("The slope of the negative part.");
455  }
456 };
457 
459 struct BatchNormAttrs : public tvm::AttrsNode<BatchNormAttrs> {
460  int axis;
461  double epsilon;
462  bool center;
463  bool scale;
464  double momentum;
465 
466  TVM_DECLARE_ATTRS(BatchNormAttrs, "relax.attrs.BatchNormAttrs") {
467  TVM_ATTR_FIELD(axis).describe("The axis along which the normalization is applied.");
468  TVM_ATTR_FIELD(epsilon).describe("Small float added to variance to avoid dividing by zero");
469  TVM_ATTR_FIELD(center).describe(
470  "Indicating if the beta offset will be added to the normalized tensor.");
471  TVM_ATTR_FIELD(scale).describe("Indicating if the gamma scale will be multiplied.");
472  TVM_ATTR_FIELD(momentum).describe("The value used for the moving_mean and moving_var update.");
473  }
474 }; // struct BatchNormAttrs
475 
477 struct LayerNormAttrs : public tvm::AttrsNode<LayerNormAttrs> {
479  double epsilon;
480  bool center;
481  bool scale;
482 
483  TVM_DECLARE_ATTRS(LayerNormAttrs, "relax.attrs.LayerNormAttrs") {
484  TVM_ATTR_FIELD(axes).describe("The axes that along which the normalization is applied.");
485  TVM_ATTR_FIELD(epsilon).describe("Small float added to variance to avoid dividing by zero");
486  TVM_ATTR_FIELD(center).describe(
487  "Indicating if the beta offset will be added to the normalized tensor.");
488  TVM_ATTR_FIELD(scale).describe("Indicating if the gamma scale will be multiplied.");
489  }
490 }; // struct LayerNormAttrs
491 
493 struct GroupNormAttrs : public tvm::AttrsNode<GroupNormAttrs> {
497  double epsilon;
498  bool center;
499  bool scale;
500 
501  TVM_DECLARE_ATTRS(GroupNormAttrs, "relax.attrs.GroupNormAttrs") {
502  TVM_ATTR_FIELD(num_groups).describe("The number of groups to separate the channels into.");
503  TVM_ATTR_FIELD(channel_axis).describe("The axis that represents the channel.");
504  TVM_ATTR_FIELD(axes).describe(
505  "The axes that along which the normalization is applied (excluding the channel axis).");
506  TVM_ATTR_FIELD(epsilon).describe("Small float added to variance to avoid dividing by zero");
507  TVM_ATTR_FIELD(center).describe(
508  "Indicating if the beta offset will be added to the normalized tensor.");
509  TVM_ATTR_FIELD(scale).describe("Indicating if the gamma scale will be multiplied.");
510  }
511 }; // struct GroupNormAttrs
512 
514 struct RMSNormAttrs : public tvm::AttrsNode<RMSNormAttrs> {
516  double epsilon;
517 
518  TVM_DECLARE_ATTRS(RMSNormAttrs, "relax.attrs.RMSNormAttrs") {
519  TVM_ATTR_FIELD(axes).describe("The axes that along which the normalization is applied.");
520  TVM_ATTR_FIELD(epsilon).describe("Small float added to variance to avoid dividing by zero");
521  }
522 }; // struct RMSNormAttrs
523 
525 struct NLLLossAttrs : public tvm::AttrsNode<NLLLossAttrs> {
528 
529  TVM_DECLARE_ATTRS(NLLLossAttrs, "relax.attrs.NLLLossAttrs") {
530  TVM_ATTR_FIELD(reduction).set_default("mean").describe(
531  "The reduction method to apply to the output. Can be"
532  "'none', 'mean' or 'sum'.");
533  TVM_ATTR_FIELD(ignore_index).describe("The target value to ignore.");
534  }
535 }; // struct NLLLossAttrs
536 
538 struct DropoutAttrs : public tvm::AttrsNode<DropoutAttrs> {
539  double rate;
540 
541  TVM_DECLARE_ATTRS(DropoutAttrs, "relax.attrs.DropoutAttrs") {
542  TVM_ATTR_FIELD(rate).describe(
543  "Fraction of the input that gets dropped out during training time");
544  }
545 }; // struct DropoutAttrs
546 
548 struct AttentionAttrs : public tvm::AttrsNode<AttentionAttrs> {
552 
553  TVM_DECLARE_ATTRS(AttentionAttrs, "relax.attrs.AttentionAttrs") {
554  TVM_ATTR_FIELD(scale).describe(
555  "The custom scale applied before the softmax. The default value is 1 / sqrt(head_dim).");
557  .describe("The type of the causal mask, i.e. 'TopLeft' and 'BottomRight'.");
558  TVM_ATTR_FIELD(window_size).describe("The size of the window for sliding-window attention.");
559  }
560 }; // struct AttentionAttrs
561 
563 struct PadAttrs : public tvm::AttrsNode<PadAttrs> {
566 
567  TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs") {
568  TVM_ATTR_FIELD(pad_width).describe(
569  "Number of values padded to the edges of each axis, "
570  "in the format of (before_1, after_1, ..., before_N, after_N)");
572  .set_default("constant")
573  .describe(
574  "Padding type to use. \"constant\" pads with constant_value, "
575  "\"edge\" pads using the edge values of the input array, "
576  "\"reflect\" pads by reflecting values with respect to the edges.");
577  }
578 };
579 
580 } // namespace relax
581 } // namespace tvm
582 
583 #endif // TVM_RELAX_ATTRS_NN_H_
The base class of the all the Use "curiously recurring template pattern".
Definition: attrs.h:870
Array, container representing a contiguous sequence of ObjectRefs.
Definition: array.h:289
Runtime primitive data type.
Definition: data_type.h:43
Optional container that to represent to a Nullable variant of T.
Definition: optional.h:51
Reference to string objects.
Definition: string.h:98
#define TVM_ATTR_FIELD(FieldName)
Declare an attribute field.
Definition: attrs.h:76
runtime implementation for LibTorch/TorchScript.
Definition: analyzer.h:36
Attributes for 1d adaptive pool operator.
Definition: nn.h:375
Optional< Array< IntImm > > output_size
Definition: nn.h:376
TVM_DECLARE_ATTRS(AdaptivePool1DAttrs, "relax.attrs.AdaptivePool1DAttrs")
Definition: nn.h:380
String layout
Definition: nn.h:377
String out_layout
Definition: nn.h:378
Attributes for 2d adaptive pool operator.
Definition: nn.h:397
TVM_DECLARE_ATTRS(AdaptivePool2DAttrs, "relax.attrs.AdaptivePool2DAttrs")
Definition: nn.h:402
Optional< Array< IntImm > > output_size
Definition: nn.h:398
String layout
Definition: nn.h:399
String out_layout
Definition: nn.h:400
Attributes for 3d adaptive pool operator.
Definition: nn.h:419
TVM_DECLARE_ATTRS(AdaptivePool3DAttrs, "relax.attrs.AdaptivePool3DAttrs")
Definition: nn.h:424
String layout
Definition: nn.h:421
String out_layout
Definition: nn.h:422
Optional< Array< IntImm > > output_size
Definition: nn.h:420
Attributes used in Attention operator.
Definition: nn.h:548
Optional< String > causal_mask
Definition: nn.h:550
TVM_DECLARE_ATTRS(AttentionAttrs, "relax.attrs.AttentionAttrs")
Definition: nn.h:553
Optional< FloatImm > scale
Definition: nn.h:549
Optional< IntImm > window_size
Definition: nn.h:551
Attributes used in batch_norm operator.
Definition: nn.h:459
TVM_DECLARE_ATTRS(BatchNormAttrs, "relax.attrs.BatchNormAttrs")
Definition: nn.h:466
bool scale
Definition: nn.h:463
double epsilon
Definition: nn.h:461
int axis
Definition: nn.h:460
double momentum
Definition: nn.h:464
bool center
Definition: nn.h:462
Attributes used in Conv1d operator.
Definition: nn.h:33
Array< IntImm > dilation
Definition: nn.h:36
String out_layout
Definition: nn.h:40
int groups
Definition: nn.h:37
Array< IntImm > padding
Definition: nn.h:35
Array< IntImm > strides
Definition: nn.h:34
String data_layout
Definition: nn.h:38
DataType out_dtype
Definition: nn.h:41
String kernel_layout
Definition: nn.h:39
TVM_DECLARE_ATTRS(Conv1DAttrs, "relax.attrs.Conv1DAttrs")
Definition: nn.h:43
Attributes used in Conv1DTranspose operator.
Definition: nn.h:166
Array< IntImm > output_padding
Definition: nn.h:169
String data_layout
Definition: nn.h:172
Array< IntImm > dilation
Definition: nn.h:170
Array< IntImm > strides
Definition: nn.h:167
DataType out_dtype
Definition: nn.h:175
TVM_DECLARE_ATTRS(Conv1DTransposeAttrs, "relax.attrs.Conv1DTransposeAttrs")
Definition: nn.h:177
String out_layout
Definition: nn.h:174
Array< IntImm > padding
Definition: nn.h:168
String kernel_layout
Definition: nn.h:173
int groups
Definition: nn.h:171
Attributes used in Conv2d operator.
Definition: nn.h:76
TVM_DECLARE_ATTRS(Conv2DAttrs, "relax.attrs.Conv2DAttrs")
Definition: nn.h:86
String kernel_layout
Definition: nn.h:82
DataType out_dtype
Definition: nn.h:84
String data_layout
Definition: nn.h:81
int groups
Definition: nn.h:80
Array< IntImm > strides
Definition: nn.h:77
Array< IntImm > dilation
Definition: nn.h:79
Array< IntImm > padding
Definition: nn.h:78
String out_layout
Definition: nn.h:83
Attributes used in Conv2d operator.
Definition: nn.h:211
Array< IntImm > dilation
Definition: nn.h:215
Array< IntImm > output_padding
Definition: nn.h:214
Array< IntImm > padding
Definition: nn.h:213
Array< IntImm > strides
Definition: nn.h:212
TVM_DECLARE_ATTRS(Conv2DTransposeAttrs, "relax.attrs.Conv2DTransposeAttrs")
Definition: nn.h:222
String kernel_layout
Definition: nn.h:218
int groups
Definition: nn.h:216
String data_layout
Definition: nn.h:217
String out_layout
Definition: nn.h:219
DataType out_dtype
Definition: nn.h:220
Attributes used in Conv3d operator.
Definition: nn.h:121
String out_layout
Definition: nn.h:128
TVM_DECLARE_ATTRS(Conv3DAttrs, "relax.attrs.Conv3DAttrs")
Definition: nn.h:131
Array< IntImm > dilation
Definition: nn.h:124
String data_layout
Definition: nn.h:126
Array< IntImm > strides
Definition: nn.h:122
DataType out_dtype
Definition: nn.h:129
Array< IntImm > padding
Definition: nn.h:123
String kernel_layout
Definition: nn.h:127
int groups
Definition: nn.h:125
Attributes used in dropout operator.
Definition: nn.h:538
double rate
Definition: nn.h:539
TVM_DECLARE_ATTRS(DropoutAttrs, "relax.attrs.DropoutAttrs")
Definition: nn.h:541
Attributes used in group_norm operator.
Definition: nn.h:493
int num_groups
Definition: nn.h:494
int channel_axis
Definition: nn.h:495
double epsilon
Definition: nn.h:497
TVM_DECLARE_ATTRS(GroupNormAttrs, "relax.attrs.GroupNormAttrs")
Definition: nn.h:501
Array< Integer > axes
Definition: nn.h:496
bool center
Definition: nn.h:498
bool scale
Definition: nn.h:499
Attributes used in layer_norm operator.
Definition: nn.h:477
bool scale
Definition: nn.h:481
TVM_DECLARE_ATTRS(LayerNormAttrs, "relax.attrs.LayerNormAttrs")
Definition: nn.h:483
bool center
Definition: nn.h:480
Array< Integer > axes
Definition: nn.h:478
double epsilon
Definition: nn.h:479
Attributes used in softmax operators.
Definition: nn.h:450
TVM_DECLARE_ATTRS(LeakyReluAttrs, "relax.attrs.LeakyReluAttrs")
Definition: nn.h:453
double alpha
Definition: nn.h:451
Attributes used in nll_loss operator.
Definition: nn.h:525
int ignore_index
Definition: nn.h:527
TVM_DECLARE_ATTRS(NLLLossAttrs, "relax.attrs.NLLLossAttrs")
Definition: nn.h:529
String reduction
Definition: nn.h:526
Attributes used for the padding operator.
Definition: nn.h:563
TVM_DECLARE_ATTRS(PadAttrs, "relay.attrs.PadAttrs")
Definition: nn.h:567
tvm::String pad_mode
Definition: nn.h:565
Array< Integer > pad_width
Definition: nn.h:564
Attributes used in max_pool1d and avg_pool1d operator.
Definition: nn.h:258
Array< IntImm > padding
Definition: nn.h:261
TVM_DECLARE_ATTRS(Pool1DAttrs, "relax.attrs.Pool1DAttrs")
Definition: nn.h:268
bool count_include_pad
Definition: nn.h:264
String layout
Definition: nn.h:265
Array< IntImm > strides
Definition: nn.h:260
Array< IntImm > dilation
Definition: nn.h:262
bool ceil_mode
Definition: nn.h:263
Array< IntImm > pool_size
Definition: nn.h:259
String out_layout
Definition: nn.h:266
Attributes used in max_pool2d and avg_pool2d operator.
Definition: nn.h:295
Array< IntImm > padding
Definition: nn.h:298
String layout
Definition: nn.h:302
bool count_include_pad
Definition: nn.h:301
String out_layout
Definition: nn.h:303
Array< IntImm > dilation
Definition: nn.h:299
bool ceil_mode
Definition: nn.h:300
Array< IntImm > strides
Definition: nn.h:297
Array< IntImm > pool_size
Definition: nn.h:296
TVM_DECLARE_ATTRS(Pool2DAttrs, "relax.attrs.Pool2DAttrs")
Definition: nn.h:305
Attributes used in max_pool3d and avg_pool3d operator.
Definition: nn.h:335
bool ceil_mode
Definition: nn.h:340
Array< IntImm > dilation
Definition: nn.h:339
String layout
Definition: nn.h:342
bool count_include_pad
Definition: nn.h:341
String out_layout
Definition: nn.h:343
Array< IntImm > pool_size
Definition: nn.h:336
Array< IntImm > strides
Definition: nn.h:337
TVM_DECLARE_ATTRS(Pool3DAttrs, "relax.attrs.Pool3DAttrs")
Definition: nn.h:345
Array< IntImm > padding
Definition: nn.h:338
Attributes used in rms_norm operator.
Definition: nn.h:514
TVM_DECLARE_ATTRS(RMSNormAttrs, "relax.attrs.RMSNormAttrs")
Definition: nn.h:518
double epsilon
Definition: nn.h:516
Array< Integer > axes
Definition: nn.h:515
Attributes used in softmax operators.
Definition: nn.h:441
int axis
Definition: nn.h:442
TVM_DECLARE_ATTRS(SoftmaxAttrs, "relax.attrs.SoftmaxAttrs")
Definition: nn.h:444