• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/**
2 * Copyright 2019-2021 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16include "ops_types.fbs";
17
18namespace mindspore.schema;
19
20union PrimitiveType {
21    Abs,
22    Activation,
23    ActivationGrad,
24    Adam,
25    AddFusion,
26    AdderFusion,
27    AddGrad,
28    AddN,
29    All,
30    ApplyMomentum,
31    ArgMaxFusion,
32    ArgMinFusion,
33    Assert,
34    Assign,
35    AssignAdd,
36    AudioSpectrogram,
37    AvgPoolFusion,
38    AvgPoolGrad,
39    BatchNorm,
40    BatchNormGrad,
41    BatchToSpace,
42    BatchToSpaceND,
43    BiasAdd,
44    BinaryCrossEntropy,
45    BinaryCrossEntropyGrad,
46    BiasAddGrad,
47    BroadcastTo,
48    Cast,
49    Ceil,
50    Clip,
51    Concat,
52    Attention,
53    Conv2DBackpropFilterFusion,
54    Conv2DBackpropInputFusion,
55    Conv2DFusion,
56    Conv2dTransposeFusion,
57    Cos,
58    ConstantOfShape,
59    Crop,
60    CustomExtractFeatures,
61    CustomNormalize,
62    CustomPredict,
63    DeConv2DGradFilter,
64    Depend,
65    DepthToSpace,
66    DetectionPostProcess,
67    DivFusion,
68    DivGrad,
69    Dropout,
70    DropoutGrad,
71    Elu,
72    Eltwise,
73    Equal,
74    EmbeddingLookupFusion,
75    ExpFusion,
76    ExpandDims,
77    FakeQuantWithMinMaxVars,
78    FakeQuantWithMinMaxVarsPerChannel,
79    FftReal,
80    FftImag,
81    Flatten,
82    FlattenGrad,
83    Floor,
84    FloorDiv,
85    FloorMod,
86    Fill,
87    FullConnection,
88    FusedBatchNorm,
89    Gather,
90    GatherNd,
91    Greater,
92    GreaterEqual,
93    HashtableLookup,
94    InstanceNorm,
95    LayerNormFusion,
96    LeakyRelu,
97    Less,
98    LessEqual,
99    Log,
100    LogGrad,
101    LogicalAnd,
102    LogicalNot,
103    LogicalOr,
104    LpNormalization,
105    LRN,
106    LshProjection,
107    LSTM,
108    L2NormalizeFusion,
109    MatMulFusion,
110    Maximum,
111    MaximumGrad,
112    MaxPoolFusion,
113    MaxPoolGrad,
114    SwitchLayer,
115    Mfcc,
116    Minimum,
117    MinimumGrad,
118    Mod,
119    MulFusion,
120    MulGrad,
121    Neg,
122    NegGrad,
123    NotEqual,
124    NonMaxSuppression,
125    OneHot,
126    OnesLike,
127    PadFusion,
128    PartialFusion,
129    PowerGrad,
130    PowFusion,
131    PriorBox,
132    PReLUFusion,
133    QuantDTypeCast,
134    Rank,
135    Range,
136    Reciprocal,
137    RealDiv,
138    ReduceFusion,
139    Reshape,
140    Resize,
141    ReverseSequence,
142    ReverseV2,
143    Rfft,
144    ROIPooling,
145    Round,
146    Rsqrt,
147    ScaleFusion,
148    ScatterNd,
149    SGD,
150    Shape,
151    SigmoidCrossEntropyWithLogits,
152    SigmoidCrossEntropyWithLogitsGrad,
153    Sin,
154    SkipGram,
155    SliceFusion,
156    SmoothL1Loss,
157    SmoothL1LossGrad,
158    Softmax,
159    SoftmaxCrossEntropyWithLogits,
160    SpaceToBatch,
161    SpaceToBatchND,
162    SpaceToDepth,
163    SparseSoftmaxCrossEntropyWithLogits,
164    SparseToDense,
165    Split,
166    Sqrt,
167    Squeeze,
168    Square,
169    SquaredDifference,
170    Stack,
171    StridedSlice,
172    SubFusion,
173    SubGrad,
174    Switch,
175    TensorListFromTensor,
176    TensorListGetItem,
177    TensorListReserve,
178    TensorListSetItem,
179    TensorListStack,
180    TileFusion,
181    TopKFusion,
182    Transpose,
183    Unique,
184    UnsortedSegmentSum,
185    Unsqueeze,
186    Unstack,
187    LSTMGrad,
188    Where,
189    ZerosLike,
190    Select,
191    ScatterNdUpdate,
192    GRU,
193    NonZero,
194    InvertPermutation,
195    Size,
196    RandomStandardNormal,
197    CropAndResize,
198    Erf,
199    StridedSliceGrad,
200    IsFinite,
201    LinSpace,
202    UniformReal,
203    AbsGrad,
204    RsqrtGrad,
205    SqrtGrad,
206    LayerNormGrad,
207    ResizeGrad,
208    Splice,
209    LogSoftmax,
210    Call,
211    Custom,
212    CumSum,
213    SplitWithOverlap,
214    GenOP,
215    RaggedRange,
216    GLU,
217    TensorArray,
218    TensorArrayRead,
219    TensorArrayWrite,
220    Affine,
221    AllGather,
222    ReduceScatter,
223    DynamicQuant,
224    LSTMGradData,
225    LSTMGradWeight,
226    RandomNormal,
227    NLLLoss,
228    NLLLossGrad,
229    FormatTranspose,
230    GatherD,
231    GroupNormFusion,
232    Log1p,
233    TensorScatterAdd,
234    SparseFillEmptyRows,
235    SparseReshape,
236    SparseSegmentSum,
237    ScatterElements,
238    Triu,
239    Tril,
240    AdamWeightDecay,
241    FillV2,
242}
243
244table Abs {
245}
246
247table Activation {
248    activation_type: ActivationType = 0;
249    alpha: float;
250    min_val: float;
251    max_val: float;
252    approximate: bool = false;
253}
254
255table ActivationGrad {
256    activation_type: ActivationType;
257    alpha: float;
258}
259
260table Adam {
261    use_locking: bool;
262    use_nesterov: bool;
263}
264
265table AddFusion {
266    activation_type: ActivationType = 0;
267}
268
269table AdderFusion {
270    format: Format = 0;
271    kernel_size: [long];
272    stride: [long];
273    dilation: [long];
274    pad_mode: PadMode;
275    pad_list: [long];
276    group: long;
277    in_channel: long;
278    out_channel: long;
279    activation_type: ActivationType = 0;
280}
281
282table AddGrad {
283}
284
285table AddN {
286}
287
288table All {
289    keep_dims: long;
290}
291
292table ApplyMomentum {
293    use_nesterov: bool;
294    use_locking: bool;
295    gradient_scale: float;
296}
297
298table ArgMaxFusion {
299    axis: long;
300    top_k: long = 1;
301    keep_dims: bool;
302    out_max_value: bool;
303}
304
305table ArgMinFusion {
306    axis: long;
307    top_k: long;
308    keep_dims: bool;
309    out_max_value: bool;
310}
311
312table Assert {
313    summarize: long;
314}
315
316table Assign {
317}
318
319table AssignAdd {
320}
321
322table AudioSpectrogram {
323    window_size: long;
324    stride: long;
325    mag_square: bool;
326}
327
328table AvgPoolFusion {
329    kernel_size: [long];
330    strides: [long];
331    pad: [long];
332    pad_mode: PadMode;
333    round_mode: RoundMode;
334    format: Format;
335    global: bool;
336    activation_type: ActivationType = 0;
337}
338
339table AvgPoolGrad {
340    kernel_size: [long];
341    strides: [long];
342    pad_mode: PadMode;
343    format: Format;
344}
345
346table BatchNorm {
347    epsilon: float;
348    format: Format;
349    is_training: bool;
350}
351
352table BatchNormGrad {
353    epsilon: float;
354    is_training: bool;
355}
356
357table BatchToSpace {
358    block_size: [long];
359    crops: Vec2D;
360}
361
362table BatchToSpaceND {
363    block_shape: [long];
364    crops: Vec2D;
365}
366
367table BiasAdd {
368    format: Format;
369}
370
371table BinaryCrossEntropy {
372    reduction: Reduction;
373}
374
375table BinaryCrossEntropyGrad {
376    reduction: Reduction = 1;
377}
378
379table BiasAddGrad {
380}
381
382table BroadcastTo {
383    shape: [long];
384}
385
386table Cast {
387}
388
389table Ceil {
390}
391
392table Clip {
393    max: float;
394    min: float;
395}
396
397table Concat {
398    axis: long;
399}
400
401table Attention {
402    head_num: long;
403    head_size: long;
404    cross: bool;
405    scale: float;
406}
407
408table Conv2DBackpropFilterFusion {
409    format: Format = 0;
410    kernel_size: [long];
411    stride: [long];
412    dilation: [long];
413    pad_mode: PadMode;
414    pad_list: [long];
415    mode: long;
416    group: long;
417    in_channel: long;
418    out_channel: long;
419    activation_type: ActivationType = 0;
420}
421
422table Conv2DBackpropInputFusion {
423    format: Format = 0;
424    kernel_size: [long];
425    stride: [long];
426    dilation: [long];
427    pad_mode: PadMode;
428    pad: [long];
429    pad_list: [long];
430    mode: long;
431    group: long;
432    in_channel: long;
433    out_channel: long;
434    activation_type: ActivationType = 0;
435}
436
437table Conv2DFusion {
438    format: Format = 0;
439    kernel_size: [long];
440    stride: [long];
441    dilation: [long];
442    pad_mode: PadMode;
443    pad_list: [long];
444    mode: long;
445    group: long;
446    in_channel: long;
447    out_channel: long;
448    activation_type: ActivationType = 0;
449}
450
451table Conv2dTransposeFusion {
452    format: Format = 0;
453    kernel_size: [long];
454    stride: [long];
455    dilation: [long];
456    pad_mode: PadMode;
457    pad: [long];
458    pad_list: [long];
459    mode: long;
460    group: long;
461    in_channel: long;
462    out_channel: long;
463    activation_type: ActivationType = 0;
464    output_paddings: [long];
465}
466
467table Cos {
468}
469
470table ConstantOfShape {
471    data_type: long;
472    value: [float];
473}
474
475table Crop {
476    axis: long;
477    offsets: [long];
478}
479
480table CustomExtractFeatures {
481}
482
483table CustomNormalize {
484}
485
486table CustomPredict {
487    output_num: long;
488    weight_threshold: float;
489}
490
491table DeConv2DGradFilter {
492    in_channel: long;
493    out_channel: long;
494    kernel_size: [long];
495    pad_mode: PadMode;
496    pad_list: [long];
497    stride: [long];
498    dilation: [long];
499    group: long;
500    format: Format;
501    activation_type: ActivationType;
502}
503
504table Depend {
505}
506
507table DepthToSpace {
508    block_size: long;
509    format: Format = 0;
510    mode: string;
511}
512
513table DetectionPostProcess {
514    format: Format = 0;
515    input_size: long;
516    scale: [float];
517    nms_iou_threshold: float;
518    nms_score_threshold: float;
519    max_detections: long;
520    detections_per_class: long;
521    max_classes_per_detection: long;
522    num_classes: long;
523    use_regular_nms: bool;
524    out_quantized: bool;
525}
526
527table DivFusion {
528    activation_type: ActivationType = 0;
529}
530
531table DivGrad {
532}
533
534table Dropout {
535    keep_prob: float = 0.5;
536}
537
538table DropoutGrad {
539    keep_prob: float;
540}
541
542table Elu {
543    alpha: float;
544}
545
546table Eltwise {
547    mode: EltwiseMode;
548}
549
550table Equal {
551}
552
553table EmbeddingLookupFusion {
554    max_norm: float;
555}
556
557table ExpFusion {
558    base: float = -1;
559    scale: float = 1.0;
560    shift: float = 0.0;
561}
562
563table ExpandDims {
564}
565
566table FakeQuantWithMinMaxVars {
567    num_bits: long;
568    narrow_range: bool;
569}
570
571table FakeQuantWithMinMaxVarsPerChannel {
572    num_bits: long;
573    narrow_range: bool;
574}
575
576table FftReal {
577}
578
579table FftImag {
580}
581
582table Flatten {
583    axis: long = 1;
584}
585
586table FlattenGrad {
587}
588
589table Floor {
590}
591
592table FloorDiv {
593}
594
595table FloorMod {
596}
597
598table Fill {
599}
600
601table FullConnection {
602    has_bias: bool;
603    use_axis: bool;
604    axis: long;
605    activation_type: ActivationType = 0;
606}
607
608table FusedBatchNorm {
609    epsilon: float = 0.0001;
610    momentum: float = 0.9;
611    mode: long;
612}
613
614table Gather {
615}
616
617table GatherNd {
618}
619
620table Greater {
621}
622
623table GreaterEqual {
624}
625
626table HashtableLookup {
627}
628
629table InstanceNorm {
630    epsilon: float;
631}
632
633table LayerNormFusion {
634    begin_norm_axis: long;
635    epsilon: float = 0.00001;
636    elementwise_affine: bool;
637    begin_params_axis: long;
638}
639
640table LeakyRelu {
641    negative_slope: float;
642}
643
644table Less {
645}
646
647table LessEqual {
648}
649
650table Log {
651}
652
653table LogGrad {
654}
655
656table LogicalAnd {
657}
658
659table LogicalNot {
660}
661
662table LogicalOr {
663}
664
665table LpNormalization {
666    axis: long;
667    p: long;
668}
669
670table LRN {
671    depth_radius: long;
672    bias: float;
673    alpha: float;
674    beta: float;
675    norm_region: string;
676}
677
678table LshProjection {
679    type: LshProjectionType;
680}
681
682table LSTM {
683    bidirectional: bool;
684    has_bias: bool;
685    input_size: long;
686    hidden_size: long;
687    num_layers: long;
688    num_directions: long;
689    dropout: float;
690    zoneout_cell: float = 0;
691    zoneout_hidden: float = 0;
692    proj_size: long = 0;
693}
694
695table LSTMGrad {
696    bidirectional: bool;
697    has_bias: bool;
698    input_size: long;
699    hidden_size: long;
700    num_layers: long;
701    num_directions: long;
702    dropout: float;
703    zoneout_cell: float = 0;
704    zoneout_hidden: float = 0;
705}
706
707table L2NormalizeFusion {
708    axis: [long];
709    epsilon: float;
710    activation_type: ActivationType = 0;
711}
712
713table MatMulFusion {
714    transpose_a: bool = false;
715    transpose_b: bool = false;
716    activation_type: ActivationType = 0;
717}
718
719table Maximum {
720}
721
722table MaximumGrad {
723    grad_x: bool;
724    grad_y: bool;
725}
726
727table MaxPoolFusion {
728    kernel_size: [long];
729    strides: [long];
730    pad: [long];
731    pad_mode: PadMode;
732    round_mode: RoundMode;
733    format: Format;
734    global: bool;
735    activation_type: ActivationType = 0;
736}
737
738table MaxPoolGrad {
739    kernel_size: [long];
740    strides: [long];
741    pad_mode: PadMode;
742    format: Format;
743}
744
745table SwitchLayer {
746}
747
748table Mfcc {
749    freq_upper_limit: float;
750    freq_lower_limit: float;
751    filter_bank_channel_num: long;
752    dct_coeff_num: long;
753}
754
755table Minimum {
756}
757
758table MinimumGrad {
759    grad_x: bool;
760    grad_y: bool;
761}
762
763table Mod {
764}
765
766table MulFusion {
767    activation_type: ActivationType = 0;
768}
769
770table MulGrad {
771}
772
773table Neg {
774}
775
776table NegGrad {
777}
778
779table NotEqual {
780}
781
782table NonMaxSuppression {
783    center_point_box: long;
784}
785
786table OneHot {
787    axis: long;
788}
789
790table OnesLike {
791}
792
793table PadFusion {
794    paddings: Vec2D;
795    padding_mode: PaddingMode;
796    constant_value: float;
797}
798
799table PartialFusion {
800    sub_graph_index: long;
801}
802
803table PowerGrad {
804    power: float;
805    scale: float;
806    shift: float;
807}
808
809table PowFusion {
810    scale: float = 1;
811    shift: float = 0;
812}
813
814table PriorBox {
815    min_sizes: [long];
816    max_sizes: [long];
817    aspect_ratios: [float];
818    variances: [float];
819    image_size_w: long;
820    image_size_h: long;
821    step_w: float;
822    step_h: float;
823    clip: bool;
824    flip: bool;
825    offset: float;
826}
827
828table PReLUFusion {
829    channel_shared: bool;
830}
831
832table Rank {
833}
834
835table Range {
836    d_type: long = 0;
837    start: long = 0;
838    limit: long = 0;
839    delta: long = 1;
840}
841
842table Reciprocal {
843}
844
845table RealDiv {
846}
847
848table ReduceFusion {
849    keep_dims: bool;
850    mode: ReduceMode;
851    reduce_to_end: bool;
852    coeff: float;
853}
854
855table Reshape {
856}
857
858table Resize {
859    format: Format = 0;
860    method: ResizeMethod;
861    new_height: long;
862    new_width: long;
863    preserve_aspect_ratio: bool = false;
864    coordinate_transform_mode: CoordinateTransformMode;
865    cubic_coeff: float;
866    exclude_outside: long;
867    extrapolation_value: float;
868    nearest_mode: NearestMode;
869}
870
871table ReverseSequence {
872    seq_dim: long;
873    batch_dim: long;
874}
875
876table ReverseV2 {
877    axis: [long];
878}
879
880table Rfft {
881    fft_length: long;
882}
883
884table ROIPooling {
885    pooled_h: long;
886    pooled_w: long;
887    scale: float;
888}
889
890table Round {
891}
892
893table Rsqrt {
894}
895
896table QuantDTypeCast {
897    src_t: long;
898    dst_t: long;
899    axis: long = 0;
900}
901
902table ScaleFusion {
903    axis: long;
904    activation_type: ActivationType = 0;
905}
906
907table ScatterNd {
908}
909
910table SGD {
911    nesterov: bool;
912    dampening: float;
913    weight_decay: float;
914}
915
916table Shape {
917}
918
919table SigmoidCrossEntropyWithLogits {
920}
921
922table SigmoidCrossEntropyWithLogitsGrad {
923}
924
925table Sin {
926}
927
928table SkipGram {
929    include_all_grams: bool;
930    max_skip_size: long;
931    ngram_size: long;
932}
933
934table SliceFusion {
935    axes: [long];
936}
937
938table SmoothL1Loss {
939    beta: float;
940}
941
942table SmoothL1LossGrad {
943    beta: float;
944}
945
946table Softmax {
947    axis: [long];
948}
949
950table SoftmaxCrossEntropyWithLogits {
951}
952
953table SpaceToBatch {
954    block_size: [long];
955    paddings: Vec2D;
956}
957
958table SpaceToBatchND {
959    block_shape: [long];
960    paddings: Vec2D;
961}
962
963table SpaceToDepth {
964    block_size: long;
965    format: Format;
966}
967
968table SparseSoftmaxCrossEntropyWithLogits {
969    is_grad: bool;
970}
971
972table SparseToDense {
973}
974
975table Split {
976    output_num: long;
977    size_splits: [long];
978    axis: long;
979}
980
981table Sqrt {
982}
983
984table Squeeze {
985    axis: [long];
986}
987
988table Square {
989}
990
991table SquaredDifference {
992}
993
994table Stack {
995    axis: long;
996}
997
998table StridedSlice {
999    begin_mask: long;
1000    end_mask: long;
1001    ellipsis_mask: long;
1002    new_axis_mask: long;
1003    shrink_axis_mask: long;
1004}
1005
1006table SubFusion {
1007    activation_type: ActivationType = 0;
1008}
1009
1010table SubGrad {
1011}
1012
1013table Switch {
1014}
1015
1016table TensorListFromTensor {
1017    element_dtype: long;
1018    shape_type: long;
1019}
1020
1021table TensorListGetItem {
1022    element_dtype: long;
1023}
1024
1025table TensorListReserve {
1026    element_dtype: long;
1027    shape_type: long;
1028}
1029
1030table TensorListSetItem {
1031    element_dtype: long;
1032}
1033
1034table TensorListStack {
1035    num_elements: long;
1036    element_dtype: long;
1037}
1038
1039table TileFusion {
1040    dims: [long];
1041}
1042
1043table TopKFusion {
1044    sorted: bool = true;
1045    axis: long;
1046    largest: long;
1047}
1048
1049table Transpose {
1050}
1051
1052table Unique {
1053}
1054
1055table UnsortedSegmentSum {
1056}
1057
1058table Unsqueeze {
1059    axis: [long];
1060}
1061
1062table Unstack {
1063    axis: long = 0;
1064}
1065
1066table Where {
1067}
1068
1069table ZerosLike {
1070}
1071
1072table Select {
1073}
1074
1075table GRU {
1076    bidirectional: bool = false;
1077    linear_before_reset: long = 0;
1078}
1079
1080table NonZero {
1081}
1082
1083table InvertPermutation {
1084}
1085
1086table Size {
1087}
1088
1089table RandomStandardNormal {
1090    seed: long;
1091    seed2: long;
1092}
1093
1094table CropAndResize {
1095    method: ResizeMethod;
1096    extrapolation_value: float;
1097}
1098
1099table Erf {
1100}
1101
1102table StridedSliceGrad {
1103    begin_mask: long;
1104    end_mask: long;
1105    ellipsis_mask: long;
1106    new_axis_mask: long;
1107    shrink_axis_mask: long;
1108}
1109
1110table IsFinite {
1111}
1112
1113table LinSpace {
1114}
1115
1116table UniformReal {
1117    seed: long;
1118    seed2: long;
1119}
1120
1121table AbsGrad {
1122}
1123
1124table RsqrtGrad {
1125}
1126
1127table SqrtGrad {
1128}
1129
1130table LayerNormGrad {
1131    begin_norm_axis: long;
1132    begin_params_axis: long;
1133}
1134
1135table ResizeGrad {
1136    method: ResizeMethod;
1137    align_corners: bool;
1138}
1139
1140table Splice {
1141    context: [long];
1142    forward_indexes: [long];
1143    output_dim: long;
1144}
1145
1146table LogSoftmax {
1147    axis: long;
1148}
1149
1150table Call {
1151    is_tail_call: bool = true;
1152}
1153
1154table CumSum {
1155    exclusive: bool;
1156    reverse: bool;
1157}
1158
1159table Custom {
1160    type: string;
1161    attr: [Attribute];
1162}
1163
1164table SplitWithOverlap {
1165    split_dim: long;
1166    number_split: long;
1167    ratio: [long];
1168    extend_top: [long];
1169    extend_bottom: [long];
1170}
1171
1172table GenOP {
1173    activation_type: ActivationType = 0;
1174    alpha: float;
1175    min_val: float;
1176    max_val: float;
1177    is_training: bool;
1178    format: Format = 0;
1179    kernel_size: [long];
1180    stride: [long];
1181    dilation: [long];
1182    pad_mode: PadMode;
1183    pad_list: [long];
1184    mode: long;
1185    group: long;
1186    in_channel: long;
1187    out_channel: long;
1188    eltwise_mode: EltwiseMode;
1189    has_bias: bool;
1190    use_axis: bool;
1191    axis: long;
1192    epsilon: float = 0.0001;
1193    momentum: float = 0.9;
1194    transpose_a: bool = false;
1195    transpose_b: bool = false;
1196    pad: [long];
1197    round_mode: RoundMode;
1198    global: bool;
1199    channel_shared: bool;
1200    axes: [long];
1201    keep_dims: bool;
1202    reduce_mode: ReduceMode;
1203    reduce_to_end: bool;
1204    coeff: float;
1205}
1206
1207table RaggedRange {
1208}
1209
1210table GLU {
1211    axis: long = -1;
1212}
1213
1214table TensorArray {
1215    dynamic_size: bool = false;
1216    identical_element_shapes: bool = false;
1217    element_shape: [int];
1218    data_type: int;
1219}
1220
1221table TensorArrayRead {
1222}
1223
1224table TensorArrayWrite {
1225}
1226
1227table Affine {
1228    context: [long];
1229    output_dim: long;
1230    activation_type: ActivationType = 0;
1231    transpose_a: bool = false;
1232    transpose_b: bool = false;
1233}
1234
1235table ScatterNdUpdate {
1236}
1237
1238table AllGather {
1239    group: string;
1240    rank_size: int;
1241}
1242
1243table ReduceScatter {
1244    group: string;
1245    mode: ReduceMode;
1246    rank_size: int;
1247}
1248
1249table DynamicQuant {
1250    symmetric: bool = false;
1251    dst_type: long = 32;
1252    activation_channel: bool = false;
1253    prefer_axis: long = 0;
1254    transpose: bool = false;
1255    prefer_axes: [int];
1256}
1257
1258table LSTMGradData {
1259    bidirectional: bool;
1260    has_bias: bool;
1261    input_size: long;
1262    hidden_size: long;
1263    num_layers: long;
1264    num_directions: long;
1265    dropout: float;
1266    zoneout_cell: float = 0;
1267    zoneout_hidden: float = 0;
1268}
1269
1270table LSTMGradWeight {
1271    bidirectional: bool;
1272    has_bias: bool;
1273    input_size: long;
1274    hidden_size: long;
1275    num_layers: long;
1276    num_directions: long;
1277    dropout: float;
1278    zoneout_cell: float = 0;
1279    zoneout_hidden: float = 0;
1280}
1281
1282table RandomNormal {
1283    seed: float;
1284    mean: float;
1285    scale: float;
1286}
1287
1288table NLLLoss {
1289    reduction: Reduction;
1290}
1291
1292table NLLLossGrad {
1293    reduction: Reduction;
1294}
1295
1296table FormatTranspose {
1297    src_format: Format = 1;
1298    dst_format: Format = 1;
1299}
1300
1301table GatherD {
1302}
1303
1304table GroupNormFusion {
1305    num_groups: long;
1306    epsilon: float = 1e-5;
1307    affine: bool = true;
1308}
1309
1310table Log1p {
1311}
1312
1313table TensorScatterAdd {
1314}
1315
1316table SparseFillEmptyRows {
1317}
1318
1319table SparseReshape {
1320}
1321
1322table SparseSegmentSum {
1323}
1324
1325table ScatterElements {
1326    axis: long;
1327}
1328
1329table Triu {
1330}
1331
1332table Tril {
1333}
1334
1335table AdamWeightDecay {
1336    use_locking: bool;
1337}
1338
1339table FillV2 {
1340}
1341