• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the operation definition file for TensorFlow Lite.
17
18#ifndef TFL_OPS
19#define TFL_OPS
20
21include "mlir/IR/OpBase.td"
22include "mlir/Interfaces/InferTypeOpInterface.td"
23include "mlir/Interfaces/LoopLikeInterface.td"
24include "mlir/Interfaces/SideEffectInterfaces.td"
25include "tensorflow/compiler/mlir/lite/ir/tfl_op_interfaces.td"
26include "tensorflow/compiler/mlir/lite/ir/tfl_structs.td"
27include "tensorflow/compiler/mlir/lite/quantization/quantization.td"
28
29//===----------------------------------------------------------------------===//
30// TFLite dialect string type - uses the TF string type as implementation
31//===----------------------------------------------------------------------===//
32def TFL_Str : Type<CPred<"$_self.isa<mlir::TF::StringType>()">,
33                  "TFLite string type">,
34             BuildableType<"getType<mlir::TF::StringType>()">;
35
36//===----------------------------------------------------------------------===//
37// TFLite dialect quint8 type - uses the TF quint8 type as implementation
38//===----------------------------------------------------------------------===//
39def TFL_Quint8 : Type<CPred<"$_self.isa<mlir::TF::Quint8Type>()">,
40                    "TFLite quint8 type">,
41              BuildableType<"getType<mlir::TF::Quint8Type>()">;
42
43//===----------------------------------------------------------------------===//
44// Activation function enum definitions.
45//===----------------------------------------------------------------------===//
46
47// Allowed activation function cases
48// These should match the ActivationFunctionType enum in TFLite schema.
49def TFL_AF_None  : StrEnumAttrCase<"NONE">;
50def TFL_AF_Relu  : StrEnumAttrCase<"RELU">;
51def TFL_AF_Relu1 : StrEnumAttrCase<"RELU_N1_TO_1">;
52def TFL_AF_Relu6 : StrEnumAttrCase<"RELU6">;
53def TFL_AF_Tanh  : StrEnumAttrCase<"TANH">;
54def TFL_AF_Sign  : StrEnumAttrCase<"SIGN_BIT">;
55
56def TFL_AFAttr : StrEnumAttr<
57    "ActivationFunctionType", "fused activation enum", [
58      TFL_AF_None,  TFL_AF_Relu, TFL_AF_Relu1,
59      TFL_AF_Relu6, TFL_AF_Tanh, TFL_AF_Sign
60    ]>;
61
62//===----------------------------------------------------------------------===//
63// Padding enum definitions.
64//===----------------------------------------------------------------------===//
65
66// Allowed padding cases
67// These should match the padding enum in TFLite schema.
68def TFL_PAD_Same  : StrEnumAttrCase<"SAME">;
69def TFL_PAD_Valid : StrEnumAttrCase<"VALID">;
70def TFL_MIRRORPAD_Reflect : StrEnumAttrCase<"REFLECT">;
71def TFL_MIRRORPAD_Symmetric : StrEnumAttrCase<"SYMMETRIC">;
72
73def TFL_PaddingAttr : StrEnumAttr<"Padding", "padding enum", [
74      TFL_PAD_Same, TFL_PAD_Valid
75    ]>;
76
77def TFL_MirrorPaddingAttr : StrEnumAttr<"Padding", "Mirror pad enum", [
78      TFL_MIRRORPAD_Reflect, TFL_MIRRORPAD_Symmetric
79    ]>;
80
81//===----------------------------------------------------------------------===//
82// TensorType attribute definitions.
83//===----------------------------------------------------------------------===//
84// A type attribute containing the TensorType.
85def TensorTypeAttr : TypeAttrBase<"TensorType", "Tensor type attribute">;
86
87// A type attribute containing OpaqueElementsAttr and bytes.
88def OpaqueBytesAttr : ElementsAttrBase<
89  And<[
90    CPred<"$_self.isa<OpaqueElementsAttr>() ">,
91    CPred<"$_self.cast<OpaqueElementsAttr>().getType()"
92          ".getElementType().isInteger(8)">,
93  ]>,
94  "opaque bytes attribute"
95 > {
96  let storageType = [{ OpaqueElementsAttr }];
97  let returnType = [{ OpaqueElementsAttr }];
98  let convertFromStorage = "$_self";
99}
100
101//===----------------------------------------------------------------------===//
102// Derived shape attribute class.
103//===----------------------------------------------------------------------===//
104class DerivedShapeAttr<code body> : DerivedAttr<"ArrayRef<int64_t>", body>;
105class DerivedTFLiteTypeAttr<code body, code convert> :
106  DerivedAttr<"tflite::TensorType", body, convert>;
107
108// TFL Runtime op trait predicate.
109class TFL_RuntimePredOpTrait<string desc, Pred pred> :
110    GenInternalOpTrait<"TFLRuntimeOpTrait"> {
111  Pred tflRuntimePredicate = pred;
112  string tflRuntimeDescription = desc;
113}
114
115class TFL_OperandsHaveSameShapesOrBroadcastableShape<
116    list<int> indices, int max_bcast_rank> :
117  TFL_RuntimePredOpTrait<"operands do not have the same shape or "
118      "broadcastable shapes within the rank " # max_bcast_rank,
119    CPred<"TFL::VerifyOperandsHaveSameShapesOrBroadcastableShape("
120            "$_op, llvm::ArrayRef<unsigned>({" # !interleave(indices, ", ") #
121            "}), " # max_bcast_rank # ")">>;
122
123// These additional types/type constraints here are used to decouple the ops
124// from runtime support for the ops. Prefer to use these types when defining
125// new TF_Ops for uniformity.
126
127// TFL Runtime type predicate.
128class TFL_RuntimeType<TypeConstraint t> {
129  Pred tflRuntimeTypePredicate = t.predicate;
130  string tflRuntimeTypeDescription = t.summary;
131}
132
133class TFL_AnyTypeOf<list<Type> allowedRuntimeTypes, string description = "",
134                    list<Type> allowedOpTypes = [AnyType]> :
135  AnyTypeOf<allowedOpTypes, description>,
136  TFL_RuntimeType<AnyTypeOf<allowedRuntimeTypes, description>>;
137
138class TFL_TensorOf<list<Type> allowedRuntimeTypes,
139                   list<Type> allowedOpTypes = [AnyType]> :
140  TensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
141
142class TFL_TensorOfOrNone<list<Type> allowedRuntimeTypes, string description = "",
143                         list<Type> allowedOpTypes = [AnyType]> :
144  AnyTypeOf<[TFL_TensorOf<allowedOpTypes>, NoneType], description>,
145  TFL_RuntimeType<AnyTypeOf<[TFL_TensorOf<allowedRuntimeTypes>, NoneType]>>;
146
147class TFL_VariadicTensorOf<list<Type> allowedRuntimeTypes,
148                   list<Type> allowedOpTypes = [AnyType]> :
149  Variadic<TensorOf<allowedOpTypes>>,
150  TFL_RuntimeType<Variadic<TensorOf<allowedRuntimeTypes>>>;
151
152def TFL_Int32Or64 : SignlessIntOfWidths<[32, 64]>;
153
154def TFL_BoolTensor : TFL_TensorOf<[I1]>;
155def TFL_FpTensor : TFL_TensorOf<[F32]>;
156def TFL_I32OrI64Tensor : TFL_TensorOf<[TFL_Int32Or64]>;
157def TFL_I32Tensor : TFL_TensorOf<[I32]>;
158def TFL_I64Tensor : TFL_TensorOf<[I64]>;
159def TFL_Complex64Tensor : TFL_TensorOf<[Complex<F<32>>]>;
160
161// TODO(jpienaar): Expand to all int types.
162def TFL_IntTensor : TypeAlias<TFL_I32Tensor, "tensor of any integer type">;
163
164class TFL_0DTensorOf<list<Type> allowedRuntimeTypes,
165                     list<Type> allowedOpTypes = [AnyType]> :
166  0DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
167class TFL_1DTensorOf<list<Type> allowedRuntimeTypes,
168                     list<Type> allowedOpTypes = [AnyType]> :
169  1DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
170class TFL_2DTensorOf<list<Type> allowedRuntimeTypes,
171                     list<Type> allowedOpTypes = [AnyType]> :
172  2DTensorOf<allowedOpTypes>, TFL_RuntimeType<TensorOf<allowedRuntimeTypes>>;
173
174// This is used to represent the type of "ref tensors" or tensors that are
175// used as variables to track state.
176def TFL_StatefulTensor : TypeAlias<AnyTensor, "stateful tensor">;
177
178//===----------------------------------------------------------------------===//
179// Rank/Shape helpers.
180//===----------------------------------------------------------------------===//
181
182// Returns true of operand is none type.
183class TFL_OperandIsNoneType<int i> :
184  CPred<"$_op.getOperand(" # i # ").getType().isa<NoneType>()">;
185
186class TFL_OperandIsUnrankedPred<int n> :
187  CPred<"$_op.getOperand(" # n # ").getType().isa<UnrankedTensorType>()">;
188
189// TODO: Some of these could be generalized and/or moved to more general
190// location.
191// Returns true if the n-th operand has unknown rank or has rank m.
192class TFL_OperandHasRank<int n, int m> :
193  PredOpTrait<"operand " # n # " is " # m # "-D",
194    Or<[TFL_OperandIsUnrankedPred<n>,
195      CPred<"$_op.getOperand(" # n #
196      ").getType().cast<ShapedType>().getRank() == " # m>]>>;
197
198// Returns true if the n-th operand is ranked and has rank dim.
199class TFL_OperandHasKnownRank<int n, int dim> : And<[
200  CPred<"$_op.getOperand(" # n # ").getType().isa<RankedTensorType>()">,
201  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() == "
202    # dim>]>;
203
204// True if operand n is ranked and has a rank > dim.
205class TFL_OperandIsRankedAndHasDimPred<int n, int dim> : And<[
206  CPred<"$_op.getOperand(" # n # ").getType().isa<RankedTensorType>()">,
207  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() > "
208  # dim>]>;
209
210// Returns true if the n-th operand is ranked and has a dimension length = size
211// at the rank dim.
212class TFL_OperandDimEquals<int n, int dim, int size> : And<[
213  TFL_OperandIsRankedAndHasDimPred<n, dim>,
214  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>()"
215      ".getShape()[" # dim # " ] == " # size>]>;
216
217// Returns true if the n-th operand is ranked and has a dimension length <=
218// size at the rank dim.
219class TFL_OperandDimIsAtMost<int n, int dim, int size> : And<[
220  TFL_OperandIsRankedAndHasDimPred<n, dim>,
221  CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>()"
222      ".getShape()[" # dim # " ] <= " # size>]>;
223
224// Returns true if the n-th operand has unknown rank or at least rank m.
225class TFL_OperandHasAtleastRank<int n, int m> :
226  PredOpTrait<"operand " # n # " is " # m # "-D",
227    Or<[CPred<"$_op.getOperand(" # n # ").getType().isa<UnrankedTensorType>()">,
228      CPred<"$_op.getOperand(" # n #
229        ").getType().cast<ShapedType>().getRank() >= " # m>]>>;
230
231class TFL_OperandRankEquals1DimOfOperand<int x, int y> :
232  PredOpTrait<"operand " # x # "'s rank equals operand " # y # "'s size",
233    Or<[TFL_OperandIsUnrankedPred<x>,
234        TFL_OperandIsUnrankedPred<y>,
235        CPred<"!$_op.getOperand(" # y #
236          ").getType().cast<ShapedType>().hasStaticShape()">,
237        CPred<"$_op.getOperand(" # x #
238          ").getType().cast<ShapedType>().getRank() == "
239          "$_op.getOperand(" # y #
240          ").getType().cast<ShapedType>().getShape()[0]">]>>;
241
242class TFL_Operand0DOr1ElementTensor<int x> :
243  PredOpTrait<"operand #" # x # " is an 0-d tensor or 1-d tensor w/ 1 element",
244    Or<[TFL_OperandHasKnownRank<x, 0>,
245        And<[TFL_OperandHasKnownRank<x, 1>, TFL_OperandDimEquals<x, 0, 1>]>]>>;
246
247// Return true if i-th dim of x-th operand is the same as j-th dim of y-th
248// operand or any of those operands does not have static shape.
249class TFL_OperandsHaveSameDims<int x, int y, int i, int j> :
250    Or<[TFL_OperandIsUnrankedPred<x>,
251        TFL_OperandIsUnrankedPred<y>,
252        CPred<"!$_op.getOperand(" # x #
253          ").getType().cast<ShapedType>().hasStaticShape()">,
254        CPred<"!$_op.getOperand(" # y #
255          ").getType().cast<ShapedType>().hasStaticShape()">,
256        CPred<"$_op.getOperand(" # x #
257          ").getType().cast<ShapedType>().getShape()[" # i # "] == "
258          "$_op.getOperand(" # y #
259          ").getType().cast<ShapedType>().getShape()[" # j # "]">]>;
260
261class TFL_OperandsHaveSameDimsTrait<int x, int y, int i, int j> :
262  PredOpTrait<"dim " # i # " of operand " # x # " equals to dim " # j #
263    " of operand " # y,
264    TFL_OperandsHaveSameDims<x, y, i, j>>;
265
266// Return true if number of elements of x-th operand is the same as j-th dim of
267// y-th operand or any of those operands does not have static shape.
268class TFL_NumElementsEqualsDim<int x, int y, int j> :
269  Or<[TFL_OperandIsUnrankedPred<x>,
270      TFL_OperandIsUnrankedPred<y>,
271      CPred<"!$_op.getOperand(" # x #
272        ").getType().cast<ShapedType>().hasStaticShape()">,
273      CPred<"!$_op.getOperand(" # y #
274        ").getType().cast<ShapedType>().hasStaticShape()">,
275      CPred<"$_op.getOperand(" # x #
276        ").getType().cast<ShapedType>().getNumElements() == "
277        "$_op.getOperand(" # y #
278        ").getType().cast<ShapedType>().getShape()[" # j # "]">]>;
279
280class TFL_NumElementsEqualsDimTrait<int x, int y, int j> :
281  PredOpTrait<"operand " # x # " has num of elements equals to dim " # j #
282    " of operand " # y,
283    TFL_NumElementsEqualsDim<x, y, j>>;
284
285// tf.uint8 and tf.quint8 are mapped to the same tflite types, so they are equal
286// when used as element types.
287class TFL_TFTypesWithSameBits<int i, int j, int num> :
288  And<[
289    Or<[CPred<"getElementTypeOrSelf($_op.getResult(" # i # ")).isa<mlir::TF::Quint" # num # "Type>()">,
290        CPred<"getElementTypeOrSelf($_op.getResult(" # i # ")).isUnsignedInteger(" # num # ")">]>,
291    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isa<mlir::TF::Quint" # num # "Type>()">,
292        CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>]>;
293
294class TFL_TFOperandTypesWithSameBits<int i, int j, int num> :
295  And<[
296    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isa<mlir::TF::Quint" # num # "Type>()">,
297        CPred<"getElementTypeOrSelf($_op.getOperand(" # i # ")).isUnsignedInteger(" # num # ")">]>,
298    Or<[CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isa<mlir::TF::Quint" # num # "Type>()">,
299        CPred<"getElementTypeOrSelf($_op.getOperand(" # j # ")).isUnsignedInteger(" # num # ")">]>]>;
300
301class TFL_OperandIsNoneOrHasRank<int n, int m> :
302  PredOpTrait<"operand " # n # " is " # m # "-D",
303    Or<[
304      TFL_OperandIsNoneType<n>,
305      TFL_OperandIsUnrankedPred<n>,
306      CPred<"$_op.getOperand(" # n #
307      ").getType().cast<ShapedType>().getRank() == " # m>]>>;
308
309class TFL_OperandIsNoneOrHasRankAtMost<int n, int m> :
310  PredOpTrait<"operand " # n # " is at most " # m # "-D",
311    Or<[
312      TFL_OperandIsNoneType<n>,
313      TFL_OperandIsUnrankedPred<n>,
314      CPred<"$_op.getOperand(" # n #
315      ").getType().cast<ShapedType>().getRank() <= " # m>]>>;
316
317class TFL_OperandHasRankAtMost<int n, int m> :
318  PredOpTrait<"operand " # n # " is at most " # m # "-D",
319    Or<[TFL_OperandIsUnrankedPred<n>,
320      CPred<"$_op.getOperand(" # n #
321      ").getType().cast<ShapedType>().getRank() <= " # m>]>>;
322
323class TFL_OperandHasRankAtLeast<int n, int m> :
324  PredOpTrait<"operand " # n # " is at least " # m # "-D",
325    Or<[TFL_OperandIsUnrankedPred<n>,
326      CPred<"$_op.getOperand(" # n #
327      ").getType().cast<ShapedType>().getRank() >= " # m>]>>;
328
329class TFL_OperandHasRankRange<int n, int x, int y> :
330  PredOpTrait<"operand " # n # " has rank range [" # x # ", " # y # "]",
331    Or<[TFL_OperandIsUnrankedPred<n>,
332      CPred<"$_op.getOperand(" # n # ").getType().cast<ShapedType>().getRank() "
333      ">= " # x # " && $_op.getOperand(" # n # ").getType().cast<ShapedType>()."
334      "getRank() <= " # y>]>>;
335
336def TFL_FloatNonNegative : AttrConstraint<
337    CPred<"$_self.isa<FloatAttr>() && "
338            "!$_self.cast<FloatAttr>().getValue().isNegative()">,
339    "whose value is non-negative">;
340
341def TFL_BoolTrue : AttrConstraint<
342    CPred<"$_self.isa<BoolAttr>() && $_self.cast<BoolAttr>().getValue()">,
343    "whose value is true">;
344
345def TFL_BoolFalse : AttrConstraint<
346    CPred<"$_self.isa<BoolAttr>() && !$_self.cast<BoolAttr>().getValue()">,
347    "whose value is false">;
348
349class TFL_StringEqualsTo<string value> : AttrConstraint<
350    CPred<"$_self.cast<StringAttr>().getValue() == \"" # value # "\"">,
351    "whose value equals to '" # value # "'">;
352
353// Ensures the array attribute's size is within the given maximum size.
354class TFL_ArrayMaxCount<int n> : AttrConstraint<
355    CPred<"$_self.isa<ArrayAttr>() && $_self.cast<ArrayAttr>().size() <= " # n>,
356    "whose size is at most " # n>;
357
358// Ensures the given integer attribute has the given value.
359class TFL_IntEqualsTo<int n> : AttrConstraint<
360    CPred<"$_self.isa<IntegerAttr>() && "
361            "$_self.cast<IntegerAttr>().getInt() == " # n>,
362    "whose value is " # n>;
363
364// This is a quantization-aware version of TCresVTEtIsSameAsOp
365class TFL_TCresVTEtIsSameAsOp<int i, int j> : And<[
366  TCOpResIsShapedTypePred<i, j>,
367  Or<[
368    TCresVTEtIsSameAsOpBase<i, j>,
369    TFL_TFTypesWithSameBits<i, j, 8>,
370    And<[
371      SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))",
372        quant_QuantizedType.predicate>,
373      CPred<"quant::QuantizedType::castToStorageType("
374                "getElementTypeOrSelf($_op.getResult(" # i # "))) == "
375            "quant::QuantizedType::castToStorageType("
376                "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>]>]>;
377
378def TFL_SameFirstOperandAndFirstResultElementType :
379  PredOpTrait<"values and output must have same element type",
380              TFL_TCresVTEtIsSameAsOp<0, 0>>;
381
382// This is a quantization-aware version of TCopVTEtAreSameAt
383class TFL_TCopVTEtAreSameAt<int i, int j, int num=8> : Or<[
384  TCopVTEtAreSameAt<[i, j]>,
385  TFL_TFOperandTypesWithSameBits<i, j, num>,
386  And<[
387    SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(" # j # "))",
388      quant_QuantizedType.predicate>,
389    CPred<"quant::QuantizedType::castToStorageType("
390              "getElementTypeOrSelf($_op.getOperand(" # i # "))) == "
391          "quant::QuantizedType::castToStorageType("
392              "getElementTypeOrSelf($_op.getOperand(" # j # ")))">]>]>;
393
394//===----------------------------------------------------------------------===//
395// TFL op common constraints.
396//===----------------------------------------------------------------------===//
397
398class OperandsSameElementTypeConstraintBase<string op> :
399  PredOpTrait<op # " operands have same element type",
400    Or<[
401      TCopVTEtIsSameAs<0, 1>,
402      // Two operands' values are both quantized and their type have the same
403      // underlying storage type.
404      And<[
405        SubstLeaves<"$_self", "getElementTypeOrSelf($_op.getOperand(0))",
406          quant_QuantizedType.predicate>,
407        CPred<"quant::QuantizedType::castToStorageType("
408                  "getElementTypeOrSelf($_op.getOperand(0))) == "
409              "quant::QuantizedType::castToStorageType("
410                  "getElementTypeOrSelf($_op.getOperand(1)))">]>]>>;
411
412// This is a constraint for most of the binary ops, e.g., add, mul, div, etc.
413// Binary ops lhs & rhs should have the same value type, and is capable to
414// compare quantization types as well.
415def BinaryOpSameElementTypeConstraint :
416  OperandsSameElementTypeConstraintBase<"binary op">;
417
418// This is a constraint for most of the comparison ops, e.g., equal, not_equal,
419// greater, greater_equal, less, etc. Comparison ops lhs & rhs should have the
420// same value type, and is capable to compare quantization types as well.
421def ComparisonOpSameElementTypeConstraint :
422  OperandsSameElementTypeConstraintBase<"comparison op">;
423
424//===----------------------------------------------------------------------===//
425// TFL common builders.
426//===----------------------------------------------------------------------===//
427
428def TFL_BroadcastableBinaryBuilder :
429  OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
430  [{
431    auto resultType =
432      OpTrait::util::getBroadcastedType(lhs.getType(), rhs.getType());
433    if (!resultType)
434      mlir::emitError($_state.location, "non-broadcastable operands");
435    $_state.addOperands({lhs, rhs});
436    $_state.types.push_back(resultType);
437  }]>;
438
439def TFL_FusedBroadcastableBinaryBuilder :
440  OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs,
441    "StringAttr":$fusedActivationFunction),
442  [{
443    buildFusedBroadcastableBinOp(
444       &$_builder, $_state, lhs, rhs, fusedActivationFunction);
445  }]>;
446
447def TFL_ComparisonBinaryBuilder :
448  OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
449  [{
450    buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
451  }]>;
452
453//===----------------------------------------------------------------------===//
454// TFL op base class.
455//===----------------------------------------------------------------------===//
456
457class TFL_Op<string mnemonic, list<OpTrait> traits = []> :
458    Op<TFL_Dialect, mnemonic, !listconcat(traits,
459      [DeclareOpInterfaceMethods<TFL_RuntimeVerification>])> {
460  // FlatBuffer generation specific information.
461  // -------------------------------------------
462  // When generating the FlatBuffer output some operations have
463  // Options (as defined in the schema). These options are effectively
464  // the attributes of the operations (e.g., what padding is to be used
465  // for a pooling operator). Not all operations have Options and some
466  // operations share Options. The following attributes indicate whether
467  // the operation has Options in the serialized FlatBuffer.
468
469  // Whether the TFLite operator has options in the schema representation.
470  bit hasOptions = 0b0;
471
472  // Use to specify a custom options type for TFLite operators where
473  // the option's name does not match the TFLite operator's name.
474  // If no customOption is specified then <name>Options is used if the op
475  // hasOptions.
476  string customOption = ?;
477}
478
479class TFL_ConvOp<string mnemonic, string opSummary, int index> :
480    TFL_Op<mnemonic, [NoSideEffect, AccumulatorUniformScale<2, 0, 1>,
481    AffineQuantizedOpInterface, AffineOpCoefficient<index, 1>, TFL_SparseOp]> {
482  let summary = opSummary # " operator";
483
484  let description = [{
485    Performs convolution operation on inputs.
486
487    Inputs:
488      `inputs[0]`: required: the input activation tensor
489      `inputs[1]`: required: the filter weight tensor
490      `inputs[2]`: optional: the bias tensor
491  }];
492
493  let arguments = (
494    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
495    TFL_TensorOf<[F32, QI8, QUI8]>:$filter,
496    TFL_TensorOfOrNone<[F32, I32, I64]>:$bias,
497    I32Attr:$dilation_h_factor,
498    I32Attr:$dilation_w_factor,
499    TFL_AFAttr:$fused_activation_function,
500    TFL_PaddingAttr:$padding,
501    I32Attr:$stride_h,
502    I32Attr:$stride_w
503  );
504
505  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
506
507  let hasOptions = 0b1;
508}
509
510
511//===----------------------------------------------------------------------===//
512// TFL op definitions.
513//===----------------------------------------------------------------------===//
514def TFL_AbsOp : TFL_Op<"abs", [
515    NoSideEffect,
516    SameOperandsAndResultShape,
517    SameOperandsAndResultType,
518    SameOperandsAndResultsScale,
519    NoQuantizableResult]> {
520  let summary = "Absolute value operator";
521
522  let description = [{
523Given a tensor `x`, this operation returns a tensor containing the absolute
524value of each element in `x`. For example, if x is an input element and y is
525an output element, this operation computes \\(y = |x|\\).
526  }];
527
528  let arguments = (ins TFL_TensorOf<[F32, QI8, QI16]>:$x);
529
530  let results = (outs TFL_TensorOf<[F32, QI8, QI16]>:$y);
531
532  let hasFolder = 1;
533}
534
535def TFL_AddOp : TFL_Op<"add", [
536    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
537      CPred<"TFL::VerifyAddOpShapeConstraints(llvm::cast<AddOp>($_op))">>,
538    ResultsBroadcastableShape,
539    NoSideEffect,
540    Commutative]> {
541  let summary = "Addition operator";
542
543  let description = [{
544    Element-wise addition operation.
545  }];
546
547  let arguments = (
548    ins TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$lhs,
549    TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$rhs,
550    TFL_AFAttr:$fused_activation_function);
551
552  let results = (outs TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$output);
553
554  let hasFolder = 1;
555
556  let builders = [TFL_FusedBroadcastableBinaryBuilder];
557
558  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
559
560  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
561
562  let hasOptions = 1;
563}
564
565def TFL_AddNOp : TFL_Op<"add_n", [
566    Commutative,
567    NoSideEffect,
568    SameOperandsAndResultsScale,
569    NoQuantizableResult]> {
570  let summary = "add_n operator";
571
572  let description = [{
573    Adds all input tensors element-wise.
574  }];
575
576  let arguments = (ins
577    TFL_VariadicTensorOf<[F32, I32]>:$inputs
578  );
579
580  let results = (outs
581    TFL_TensorOf<[F32, I32]>:$sum
582  );
583}
584
585def TFL_ReduceAnyOp : TFL_Op<"reduce_any", [
586    NoSideEffect,
587    NoQuantizableResult]> {
588  let summary = [{
589Computes the "logical or" of elements across dimensions of a tensor.
590  }];
591
592  let description = [{
593Reduces `input` along the dimensions given in `axis`. Unless
594`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
595`axis`. If `keep_dims` is true, the reduced dimensions are
596retained with length 1.
597  }];
598
599  let arguments = (ins
600    TFL_BoolTensor:$input,
601    TFL_I32Tensor:$reduction_indices,
602
603    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
604  );
605
606  let results = (outs
607    TFL_BoolTensor:$output
608  );
609
610  let hasOptions = 1;
611  let customOption = "ReducerOptions";
612}
613
614def TFL_TransposeConvOp: TFL_Op<"transpose_conv", [
615    NoSideEffect,
616    TFL_OperandHasRank<0, 1>,
617    TFL_OperandHasRank<1, 4>,
618    TFL_OperandHasRank<2, 4>,
619    PredOpTrait<"input and output must have same element type",
620      TFL_TCresVTEtIsSameAsOp<0, 2>>,
621    AccumulatorUniformScale<3, 1, 2>,
622    AffineQuantizedOpInterface, AffineOpCoefficient<0, 1>,
623    TFL_SparseOp]> {
624  let summary = "Transpose convolution operator";
625
626  let description = [{
627    Performs transpose convolution operation on input.
628  }];
629
630  let arguments = (ins
631    TFL_I32Tensor:$output_shape,
632    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$weights,
633    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
634    TFL_TensorOfOrNone<[F32, QI32, I64]>:$bias,
635    TFL_PaddingAttr:$padding,
636    Confined<I32Attr, [IntPositive]>:$stride_h,
637    Confined<I32Attr, [IntPositive]>:$stride_w
638  );
639
640  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
641
642  let hasOptions = 1;
643
644  let verifier = [{ return Verify(*this); }];
645
646  let extraClassDeclaration = [{
647    // AffineQuantizedOpInterface:
648    int GetChannelDimIndex() { return 0; }
649    int GetQuantizationDimIndex() { return 0; }
650    // SparseOpInterface:
651    std::vector<int> GetSparseOperands() { return {1}; }
652    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
653    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
654  }];
655}
656
657def TFL_AveragePool2DOp:
658    TFL_Op<"average_pool_2d",
659           [NoSideEffect,
660            SameOperandsAndResultsScale]> {
661  let summary = "Average_pool_2d operator";
662
663  let description = [{
664    Performs average-pooling operation on input.
665  }];
666
667  let arguments = (
668    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
669    I32Attr:$filter_height,
670    I32Attr:$filter_width,
671    TFL_PaddingAttr:$padding,
672    I32Attr:$stride_h,
673    I32Attr:$stride_w,
674    TFL_AFAttr:$fused_activation_function
675  );
676
677  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$output);
678
679  let hasOptions = 1;
680  let customOption = "Pool2DOptions";
681}
682
683def TFL_ArgMaxOp : TFL_Op<"arg_max", [NoSideEffect]> {
684  let summary = "ArgMax operator";
685
686  let description = [{
687    Returns the index with the largest value across dimensions of a tensor.
688  }];
689
690  let arguments = (
691    ins TFL_TensorOf<[F32, I32, I8, UI8, QI8, QUI8]>:$input,
692    TFL_I32OrI64Tensor:$dim
693  );
694
695  let results = (outs
696    TFL_I32OrI64Tensor:$output
697  );
698
699  let hasOptions = 1;
700
701  DerivedTFLiteTypeAttr output_type = DerivedTFLiteTypeAttr<[{
702    return getResult().getType().cast<TensorType>().getElementType().
703        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
704            tflite::TensorType_INT32;
705    }], [{
706      TypeAttr::get(getResult().getType().cast<TensorType>().getElementType())
707    }]>;
708}
709
710def TFL_ArgMinOp : TFL_Op<"arg_min", [NoSideEffect]> {
711  let summary = "ArgMin operator";
712
713  let description = [{
714    Returns the index with the smallest value across dimensions of a tensor.
715      a = [1, 10, 26.9, 2.8, 166.32, 62.3]
716      b = tf.math.argmin(input = a)
717      c = tf.keras.backend.eval(b)
718  }];
719
720  let arguments = (
721    ins TFL_TensorOf<[F32, I32, I8, UI8, QI8, QUI8]>:$input,
722    TFL_I32OrI64Tensor:$dim
723  );
724
725  let results = (outs
726    TFL_I32OrI64Tensor:$output
727  );
728
729  let hasOptions = 1;
730
731  DerivedTFLiteTypeAttr output_type = DerivedTFLiteTypeAttr<[{
732    return getResult().getType().cast<TensorType>().getElementType().
733        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
734            tflite::TensorType_INT32;
735    }], [{
736      TypeAttr::get(getResult().getType().cast<TensorType>().getElementType())
737    }]>;
738}
739
740def TFL_CeilOp: TFL_Op<"ceil", [
741    NoSideEffect,
742    SameOperandsAndResultShape,
743    SameOperandsAndResultType,
744    NoQuantizableResult]> {
745  let summary = "Ceil operator";
746
747  let description = [{
748    Returns element-wise ceil value of the input.
749  }];
750
751  let arguments = (ins TFL_FpTensor:$x);
752
753  let results = (outs TFL_FpTensor:$y);
754}
755
756def TFL_ConcatenationOp : TFL_Op<"concatenation",
757  [
758    NoSideEffect,
759    TFL_SameFirstOperandAndFirstResultElementType,
760    SameOperandsAndResultsScale
761  ]> {
762  let summary = "Concatenation operator";
763
764  let description = [{
765    Concatenates tensors along one dimension
766  }];
767
768  let arguments = (
769    ins TFL_VariadicTensorOf<
770      [F32, I64, I32, I16, I8, QI8, QUI8, UI8, I1]>:$values,
771    I32Attr:$axis,
772    TFL_AFAttr:$fused_activation_function
773  );
774
775  let results = (outs
776    TFL_TensorOf<
777      [F32, I64, I32, I16, I8, QI8, QUI8, UI8, I1]>:$output
778  );
779
780  let hasOptions = 1;
781
782  let hasFolder = 1;
783
784  let verifier = [{ return Verify(*this); }];
785
786  let extraClassDeclaration = [{
787    // SameScalesOpInterface:
788    bool RequiredSameOperandsAndResultsScale(bool sign, int bit_width) {
789      // uint8 doesn't require same operands and results scales.
790      bool is_uint8 = !sign && (bit_width == 8);
791      return !is_uint8;
792    }
793  }];
794}
795
796def TFL_ConstOp : Op<TFL_Dialect, "pseudo_const", [ConstantLike, NoSideEffect,
797    FirstAttrDerivedResultType]> {
798  let summary = "Constant pseudo op.";
799
800  let description = [{
801    Represents a constant value in TensorFlow Lite dialect. This is not an
802    actual operation and it will be lowered to buffer instead.
803
804    The op is allowed to have all the same type of attributes as tf.Const does
805    (e.g., opaque TF attributes are allowed).
806  }];
807
808  let arguments = (ins ElementsAttr:$value);
809
810  let results = (outs AnyTensor:$output);
811
812  let hasFolder = 1;
813
814  let builders = [
815    OpBuilderDAG<(ins "Attribute":$value),
816    [{
817      $_state.addAttribute("value", value);
818      $_state.addTypes(value.getType());
819    }]>
820  ];
821}
822
823def TFL_SparseConstOp : Op<TFL_Dialect, "pseudo_sparse_const", [
824    NoSideEffect,
825    FirstAttrDerivedResultType]> {
826  let summary = "Sparse constant pseudo op.";
827
828  let description = [{
829    Represents a sparse constant value in TensorFlow Lite dialect. This is not
830    an actual operation and it will be lowered to buffer instead.
831  }];
832
833  let arguments = (ins ElementsAttr:$value,
834                   SparsityParameterAttr:$s_param,
835                   ElementsAttr:$compressed_data);
836
837  let results = (outs AnyTensor:$output);
838
839  let builders = [
840    OpBuilderDAG<(ins "Attribute":$value, "SparsityParameterAttr":$s_param,
841      "Attribute":$compressed_data),
842    [{
843      $_state.addTypes(value.getType());
844      $_state.addAttribute("value", value);
845      $_state.addAttribute("s_param", s_param);
846      $_state.addAttribute("compressed_data", compressed_data);
847    }]>
848  ];
849}
850
851def TFL_ExternalConstOp : Op<TFL_Dialect, "external_const", [NoSideEffect]> {
852  let summary = "External const op.";
853
854  let description = [{
855    External const op holds a `buffer_index` which points to a constant
856    in the flatbuffer.
857  }];
858
859  let arguments = (ins I32Attr:$buffer_index);
860
861  let results = (outs AnyTensor:$output);
862}
863
864def TFL_Conv2DOp : TFL_ConvOp<"conv_2d", "Convolution", 0> {
865  let hasCanonicalizer = 1;
866
867  let extraClassDeclaration = [{
868    // AffineQuantizedOpInterface:
869    int GetChannelDimIndex() { return 0; }
870    int GetQuantizationDimIndex() { return 0; }
871    // SparseOpInterface:
872    std::vector<int> GetSparseOperands() { return {1}; }
873    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
874    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
875  }];
876}
877
878def TFL_CosOp: TFL_Op<"cos", [
879    NoSideEffect,
880    SameOperandsAndResultShape,
881    SameOperandsAndResultType,
882    NoQuantizableResult]> {
883  let summary = "Cosine operator";
884
885  let description = [{
886    Computes element-wise Cosine of input
887  }];
888
889  let arguments = (ins TFL_FpTensor:$x);
890
891  let results = (outs TFL_FpTensor:$y);
892
893  let hasFolder = 1;
894}
895
896def TFL_CumsumOp: TFL_Op<"cumsum", [
897    NoSideEffect,
898    PredOpTrait<"input and output must have same element type",
899      TFL_TCresVTEtIsSameAsOp<0, 0>>,
900    NoQuantizableResult,
901    TFL_OperandHasRank<1, 0>]> {
902  let summary = "Cumsum operator";
903
904  let description = [{
905    Compute the cumulative sum of the tensor x along axis.
906  }];
907
908  let arguments = (
909    ins TFL_TensorOf<[F32, I32, I64]>:$input,
910    TFL_I32Tensor:$axis,
911    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
912    DefaultValuedAttr<BoolAttr, "false">:$reverse
913  );
914
915  let results = (outs TFL_TensorOf<[F32, I32, I64]>:$output);
916
917  let hasOptions = 1;
918}
919
920def TFL_DepthwiseConv2DOp :
921    TFL_ConvOp<"depthwise_conv_2d", "Depthwise-separable convolution", 3> {
922  let arguments = (
923    ins TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$input,
924    TFL_TensorOf<[F32, QI8, QUI8]>:$filter,
925    TFL_TensorOfOrNone<[F32, I32, I64]>:$bias,
926    I32Attr:$dilation_h_factor,
927    I32Attr:$dilation_w_factor,
928    TFL_AFAttr:$fused_activation_function,
929    TFL_PaddingAttr:$padding,
930    I32Attr:$stride_h,
931    I32Attr:$stride_w,
932    I32Attr:$depth_multiplier
933  );
934
935  let hasCanonicalizer = 1;
936
937  let extraClassDeclaration = [{
938    // AffineQuantizedOpInterface:
939    int GetChannelDimIndex() { return 3; }
940    int GetQuantizationDimIndex() { return 3; }
941    // SparseOpInterface:
942    std::vector<int> GetSparseOperands() { return {1}; }
943    std::vector<std::vector<int>> GetFloatBlockSize() { return {}; }
944    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {}; }
945  }];
946}
947
948def TFL_FCWO_Default  : StrEnumAttrCase<"DEFAULT">;
949def TFL_FCWO_Shuffled4x16i8  : StrEnumAttrCase<"SHUFFLED4x16INT8">;
950
951def TFL_FullyConnectedOptionsWeightFormatAttr :
952    StrEnumAttr<"FullyConnectedOptionsWeightsFormat",
953                "fully connected options weights format", [
954      TFL_FCWO_Default, TFL_FCWO_Shuffled4x16i8
955    ]>;
956
957// TODO(jpienaar): Update post discussion on semantics of FC OP.
958def TFL_FullyConnectedOp : TFL_Op<"fully_connected", [
959    NoSideEffect, AccumulatorUniformScale<2, 0, 1>,
960    AffineQuantizedOpInterface,
961    AffineOpCoefficient<-1, 1>,
962    TFL_SparseOp]> {
963  let summary = "Fully connected op";
964
965  let arguments = (ins
966    TFL_TensorOf<[F32, QI8, QUI8, QI16, QUI16]>:$input,
967    TFL_TensorOf<[F32, QI8, QUI8, QI16]>:$filter,
968    TFL_TensorOfOrNone<[F32, QI32, QUI32]>:$bias,
969
970    TFL_AFAttr:$fused_activation_function,
971    TFL_FullyConnectedOptionsWeightFormatAttr:$weights_format,
972    BoolAttr:$keep_num_dims
973  );
974
975  // Depending on the weights format, this op can have one or two outputs.
976  let results = (outs
977    TFL_VariadicTensorOf<[F32, QI8, QUI8, QI16, QUI16]>:$output
978  );
979
980  let verifier = [{ return Verify(*this); }];
981
982  let hasOptions = 1;
983
984  let hasCanonicalizer = 1;
985
986  let hasFolder = 1;
987
988  let extraClassDeclaration = [{
989    // AffineQuantizedOpInterface:
990    int GetChannelDimIndex() { return 0; }
991    int GetQuantizationDimIndex() { return -1; }
992    // SparseOpInterface:
993    std::vector<int> GetSparseOperands() { return {1}; }
994    std::vector<std::vector<int>> GetFloatBlockSize() { return {{1, 4}}; }
995    std::vector<std::vector<int>> GetQuantizedBlockSize() { return {{1, 16}}; }
996  }];
997}
998
999def TFL_BatchMatMulOp : TFL_Op<"batch_matmul", [
1000   NoSideEffect,
1001   TFL_OperandHasAtleastRank<0, 2>,
1002   TFL_OperandHasAtleastRank<1, 2>,
1003   PredOpTrait<"x and output must have same element type",
1004       TFL_TCresVTEtIsSameAsOp<0, 0>>,
1005   PredOpTrait<"y and output must have same element type",
1006       TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
1007
1008  let summary = "Batch Matrix Multiply Operator";
1009
1010  let description = [{
1011Performs a batched matrix multiplication on the inputs. Follows the
1012conventions of TensorFlow BatchMatMulV2, with support for unknown dimensions
1013in the batch dimensions and broadcasting.
1014
1015    Inputs:
1016      `inputs[0]`: required: input LHS
1017      `inputs[1]`: required: input RHS
1018      `adjoint_lhs`: optional: Transpose LHS (default false)
1019      `adjoint_lhs`: optional: Transpose LHS (default false)
1020  }];
1021
1022  let arguments = (ins
1023    TFL_TensorOf<[F32, QI8, QI16]>:$x,
1024    TFL_TensorOf<[F32, QI8, QI16]>:$y,
1025    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
1026    DefaultValuedAttr<BoolAttr, "false">:$adj_y
1027  );
1028
1029   let results = (outs
1030    TFL_TensorOf<[F32, QI8, QI16]>:$output
1031  );
1032
1033  let hasOptions = 1;
1034}
1035
1036def TFL_GatherOp : TFL_Op<"gather", [
1037    NoSideEffect,
1038    SameOperandsAndResultsScale,
1039    TFL_OperandHasAtleastRank<0, 1>,
1040    PredOpTrait<"params and output must have same element type",
1041      TFL_TCresVTEtIsSameAsOp<0, 0>>
1042  ]> {
1043  let summary = "Gather operator";
1044
1045  let description = [{
1046    Gather slices from `params` axis `axis` according to `indices`.
1047  }];
1048
1049  let arguments = (ins
1050    TFL_TensorOf<[F32, I1, I8, I32, I64, TFL_Str, UI8, QI8, QUI8, QI16]>:$params,
1051    TFL_TensorOf<[I32, I64]>:$indices,
1052    I32Attr:$axis
1053  );
1054
1055  let builders =
1056  [
1057    OpBuilderDAG<(ins "Value":$params, "Value":$indices, "IntegerAttr":$axis),
1058    [{ BuildGatherOp(&$_builder, $_state, params, indices, axis); }]>
1059  ];
1060
1061  let results = (outs
1062    TFL_TensorOf<[F32, I1, I8, I32, I64, TFL_Str, UI8, QI8, QUI8, QI16]>:$output
1063  );
1064
1065  let hasOptions = 1;
1066}
1067
1068def TFL_GatherNdOp : TFL_Op<"gather_nd", [
1069    NoSideEffect,
1070    PredOpTrait<"params and output must have same element type",
1071      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1072  let summary = "Gather_nd operator";
1073
1074  let description = [{
1075    Gather slices from `params` into a Tensor with shape specified by `indices`.
1076  }];
1077
1078  let arguments = (ins
1079    TFL_TensorOf<[F32, I8, I16, I64, I32, UI8, TFL_Str]>:$params,
1080    TFL_I32OrI64Tensor:$indices
1081  );
1082
1083  let results = (outs
1084    TFL_TensorOf<[F32, I8, I16, I64, I32, UI8, TFL_Str]>:$output
1085  );
1086}
1087
1088def TFL_ScatterNdOp : TFL_Op<"scatter_nd", [
1089    NoSideEffect,
1090    TFL_OperandHasAtleastRank<0, 1>,
1091    TFL_OperandHasAtleastRank<1, 1>,
1092    PredOpTrait<"updates and output must have same element type",
1093      TFL_TCresVTEtIsSameAsOp<0, 1>>
1094  ]> {
1095  let summary = "Scatter_nd operator";
1096
1097  let description = [{
1098    Scatter `updates` into a new tensor according to `indices`
1099  }];
1100
1101  let arguments = (ins
1102    TFL_TensorOf<[I32]>:$indices,
1103    TFL_TensorOf<[F32, I8, I64, I32, UI8]>:$updates,
1104    TFL_1DTensorOf<[I32]>:$shape
1105  );
1106
1107  let results = (outs
1108    TFL_TensorOf<[F32, I8, I64, I32, UI8]>:$output
1109  );
1110
1111  let verifier = [{ return Verify(*this); }];
1112
1113  let hasOptions = 1;
1114}
1115
1116// Same type check of lhs and rhs is handled by the ResultsBroadcastableShape trait.
1117def TFL_LessEqualOp : TFL_Op<"less_equal", [
1118    ResultsBroadcastableShape,
1119    ComparisonOpSameElementTypeConstraint,
1120    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1121    NoSideEffect]> {
1122  let summary = "Less_equal operator";
1123
1124  let description = [{
1125    Element-wise less_equal operation.
1126  }];
1127
1128  let arguments = (
1129      ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8]>:$lhs,
1130      TFL_TensorOf<[F32, I32, I64, QI8, QUI8]>:$rhs);
1131
1132  let results = (outs TFL_BoolTensor:$output);
1133
1134  let builders = [TFL_ComparisonBinaryBuilder];
1135
1136  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1137
1138  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1139
1140  let hasOptions = 0;
1141}
1142
1143def TFL_LocalResponseNormalizationOp : TFL_Op<"local_response_normalization", [
1144    TFL_OperandHasRank<0, 4>,
1145    SameOperandsAndResultShape,
1146    SameOperandsAndResultType,
1147    NoSideEffect,
1148    NoQuantizableResult]> {
1149  let summary = "Local Response Normalization.";
1150
1151  let description = [{
1152The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
1153dimension), and each vector is normalized independently.  Within a given vector,
1154each component is divided by the weighted, squared sum of inputs within
1155`depth_radius`.  In detail,
1156
1157    sqr_sum[a, b, c, d] =
1158        sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
1159    output = input / (bias + alpha * sqr_sum) ** beta
1160
1161For details, see [Krizhevsky et al., ImageNet classification with deep
1162convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
1163  }];
1164
1165  let arguments = (ins
1166      TFL_FpTensor:$input,
1167      I32Attr:$radius,
1168      F32Attr:$bias,
1169      F32Attr:$alpha,
1170      F32Attr:$beta
1171  );
1172
1173  let results = (outs
1174    TFL_FpTensor:$output
1175  );
1176
1177  let hasOptions = 1;
1178}
1179
1180def TFL_GreaterEqualOp : TFL_Op<"greater_equal", [
1181    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1182    ResultsBroadcastableShape,
1183    ComparisonOpSameElementTypeConstraint,
1184    NoSideEffect]> {
1185  let summary = "Greater_equal operator";
1186
1187  let description = [{
1188    Element-wise greater_equal operation.
1189  }];
1190
1191  let arguments = (
1192      ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8]>:$lhs,
1193      TFL_TensorOf<[F32, I32, I64, QUI8, QI8]>:$rhs);
1194
1195  let results = (outs TFL_BoolTensor:$output);
1196
1197  let builders = [TFL_ComparisonBinaryBuilder];
1198
1199  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1200
1201  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1202
1203  let hasOptions = 0;
1204}
1205
1206def TFL_MatrixDiagOp : TFL_Op<"matrix_diag", [
1207  NoSideEffect,
1208  TFL_OperandHasAtleastRank<0, 1>,
1209  PredOpTrait<"operand and result must have the same element type",
1210    TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1211  let summary = [{
1212    Returns a tensor with the provided diagonal and everything else padded with zeros.
1213  }];
1214
1215  let description = [{
1216    Given a diagonal, returns a tensor with the diagonal and everything else padded with zeros.
1217    Assume diagonal has k dimensions `[I, J, K, ..., N]`, then the output is a tensor of rank `k+1`
1218    with dimensions `[I, J, K, ..., N, N]` where:
1219       `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n].`
1220  }];
1221
1222  let arguments = (ins
1223    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QUI8, QI8, TFL_Quint8]>:$diagonal
1224  );
1225
1226  let results = (outs
1227    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QUI8, QI8, TFL_Quint8]>:$output
1228  );
1229
1230  let hasOptions = 0;
1231}
1232
1233def TFL_MatrixSetDiagOp : TFL_Op<"matrix_set_diag", [
1234    TFL_OperandHasAtleastRank<0, 2>,
1235    PredOpTrait<"input and result must have the same element type",
1236      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1237    NoSideEffect]> {
1238  let summary = [{
1239    Returns a batched matrix tensor with new batched diagonal values.
1240  }];
1241
1242  let description = [{
1243Given `input` and `diagonal`, this operation returns a tensor with the
1244same shape and values as `input`, except for the main diagonal of the
1245innermost matrices.  These will be overwritten by the values in `diagonal`.
1246  }];
1247
1248  let arguments = (ins
1249    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$input,
1250    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$diagonal
1251  );
1252
1253  let results = (outs
1254    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QI16, QUI8, TFL_Quint8]>:$result
1255  );
1256
1257  let hasOptions = 0;
1258}
1259
1260// These ops are named NonMaxSuppressionV4 & NonMaxSuppressionV5 to be
1261// consistent with TensorFlow's naming. They are NOT 'versions' of NMS in the
1262// sense that one is an incremental change over the other.
1263// In reality NonMaxSuppressionV5 implements Soft Non Max Suppression and
1264// NonMaxSuppressionV4 performs hard NMS.
1265
1266def TFL_NonMaxSuppressionV4Op : TFL_Op<"non_max_suppression_v4", [
1267  NoSideEffect,
1268  // Operand 0 (boxes) should have rank 2 with the dim[1] == 4 (box corners)
1269  TFL_OperandHasRank<0, 2>,
1270  PredOpTrait<"boxes should have dim[1] == 4",
1271      TFL_OperandDimEquals<0, 1, 4>>,
1272  // Operand 1 (scores) should be a 1-dim tensor
1273  TFL_OperandHasRank<1, 1>,
1274  // Other operands are scalar params.
1275  TFL_OperandHasRank<2, 0>, TFL_OperandHasRank<3, 0>,
1276  TFL_OperandHasRank<4, 0>,
1277  NoQuantizableResult]> {
1278  let summary = [{
1279Greedily selects a subset of bounding boxes in descending order of score,
1280  }];
1281
1282  let description = [{
1283pruning away boxes that have high intersection-over-union (IOU) overlap
1284with previously selected boxes.  Bounding boxes with score less than
1285`score_threshold` are removed.  Bounding boxes are supplied as
1286[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
1287diagonal pair of box corners and the coordinates can be provided as normalized
1288(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
1289is agnostic to where the origin is in the coordinate system and more
1290generally is invariant to orthogonal transformations and translations
1291of the coordinate system; thus translating or reflections of the coordinate
1292system result in the same boxes being selected by the algorithm.
1293The output of this operation is a set of integers indexing into the input
1294collection of bounding boxes representing the selected boxes.  The bounding
1295box coordinates corresponding to the selected indices can then be obtained
1296using the `tf.gather operation`.  For example:
1297  selected_indices = tf.image.non_max_suppression_v2(
1298      boxes, scores, max_output_size, iou_threshold, score_threshold)
1299  selected_boxes = tf.gather(boxes, selected_indices)
1300  }];
1301
1302  let arguments = (ins
1303    TFL_FpTensor:$boxes,
1304    TFL_FpTensor:$scores,
1305    TFL_I32Tensor:$max_output_size,
1306    TFL_FpTensor:$iou_threshold,
1307    TFL_FpTensor:$score_threshold
1308  );
1309
1310  let results = (outs
1311    TFL_I32Tensor:$selected_indices,
1312    TFL_I32Tensor:$valid_outputs
1313  );
1314}
1315
1316def TFL_NonMaxSuppressionV5Op : TFL_Op<"non_max_suppression_v5", [
1317  NoSideEffect,
1318  // Operand 0 (boxes) should have rank 2 with the dim[1] == 4 (box corners)
1319  TFL_OperandHasRank<0, 2>,
1320  PredOpTrait<"boxes should have dim[1] == 4",
1321      TFL_OperandDimEquals<0, 1, 4>>,
1322  // Operand 1 (scores) should be a 1-dim tensor
1323  TFL_OperandHasRank<1, 1>,
1324  // Other operands are scalar params.
1325  TFL_OperandHasRank<2, 0>, TFL_OperandHasRank<3, 0>,
1326  TFL_OperandHasRank<4, 0>, TFL_OperandHasRank<5, 0>,
1327  NoQuantizableResult]> {
1328  let summary = [{
1329Greedily selects a subset of bounding boxes in descending order of score,
1330  }];
1331
1332  let description = [{
1333pruning away boxes that have high intersection-over-union (IOU) overlap
1334with previously selected boxes.  Bounding boxes with score less than
1335`score_threshold` are removed.  Bounding boxes are supplied as
1336[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
1337diagonal pair of box corners and the coordinates can be provided as normalized
1338(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
1339is agnostic to where the origin is in the coordinate system and more
1340generally is invariant to orthogonal transformations and translations
1341of the coordinate system; thus translating or reflections of the coordinate
1342system result in the same boxes being selected by the algorithm.
1343The output of this operation is a set of integers indexing into the input
1344collection of bounding boxes representing the selected boxes.  The bounding
1345box coordinates corresponding to the selected indices can then be obtained
1346using the `tf.gather operation`.  For example:
1347  selected_indices = tf.image.non_max_suppression_v2(
1348      boxes, scores, max_output_size, iou_threshold, score_threshold)
1349  selected_boxes = tf.gather(boxes, selected_indices)
1350This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
1351Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
1352of other overlapping boxes instead of directly causing them to be pruned.
1353To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
1354larger than 0.
1355  }];
1356
1357  let arguments = (ins
1358    TFL_FpTensor:$boxes,
1359    TFL_FpTensor:$scores,
1360    TFL_I32Tensor:$max_output_size,
1361    TFL_FpTensor:$iou_threshold,
1362    TFL_FpTensor:$score_threshold,
1363    TFL_FpTensor:$soft_nms_sigma
1364  );
1365
1366  let results = (outs
1367    TFL_I32Tensor:$selected_indices,
1368    TFL_FpTensor:$selected_scores,
1369    TFL_I32Tensor:$valid_outputs
1370  );
1371}
1372
1373def TFL_NotEqualOp : TFL_Op<"not_equal", [
1374    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1375    ComparisonOpSameElementTypeConstraint,
1376    ResultsBroadcastableShape,
1377    Commutative,
1378    NoSideEffect,
1379    NoQuantizableResult]> {
1380  let summary = "Not_equal operator";
1381
1382  let description = [{
1383    Element-wise not_equal operation.
1384  }];
1385
1386  let arguments = (
1387      ins TFL_TensorOf<[I1, F32, I32, I64, QUI8, QI8, TFL_Quint8, TFL_Str]>:$lhs,
1388      TFL_TensorOf<[I1, F32, I32, I64, QUI8, QI8, TFL_Quint8, TFL_Str]>:$rhs);
1389
1390  let results = (outs TFL_BoolTensor:$output);
1391
1392  let builders =
1393  [
1394    OpBuilderDAG<(ins "Value":$lhs, "Value":$rhs),
1395    [{
1396        buildComparisonBinOp(&$_builder, $_state, lhs, rhs);
1397      }]>
1398  ];
1399
1400  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1401
1402  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1403}
1404
1405def TFL_DivOp : TFL_Op<"div", [
1406    // TODO(fengliuai): NoQuantizableResult is only correct for int8
1407    // quantization. update to handle Uint8 quantization.
1408    BinaryOpSameElementTypeConstraint,
1409    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
1410    ResultsBroadcastableShape,
1411    NoSideEffect,
1412    NoQuantizableResult]> {
1413  let summary = "Division operator";
1414
1415  let description = [{
1416    Element-wise division operation.
1417  }];
1418
1419  let arguments = (
1420      ins TFL_TensorOf<[F32, I32, QUI8]>:$lhs,
1421      TFL_TensorOf<[F32, I32, QUI8]>:$rhs,
1422      TFL_AFAttr:$fused_activation_function);
1423
1424  let results = (outs TFL_TensorOf<[F32, I32, QUI8]>:$output);
1425
1426  let builders = [TFL_FusedBroadcastableBinaryBuilder];
1427
1428  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1429
1430  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1431
1432  let hasOptions = 1;
1433
1434  let hasFolder = 1;
1435}
1436
1437def TFL_EluOp: TFL_Op<"elu", [
1438    NoSideEffect,
1439    SameOperandsAndResultShape,
1440    SameOperandsAndResultType,
1441    NoQuantizableResult]> {
1442  let summary = "Exponential Linear Unit operator";
1443  let description = [{
1444    Computes the exponential linear
1445      f(x) -> exp(x) - 1 for x < 0, x for x >= 0.
1446    element-wise.
1447  }];
1448
1449  let arguments = (ins TFL_FpTensor:$x);
1450
1451  let results = (outs TFL_FpTensor:$y);
1452
1453  let hasOptions = 0;
1454}
1455
1456def TFL_EmbeddingLookupOp: TFL_Op<"embedding_lookup",
1457    [NoSideEffect,
1458     PredOpTrait<"value and output must have same element type",
1459       TFL_TCresVTEtIsSameAsOp<0, 1>>,
1460     TFL_OperandHasRank<0, 1>,
1461     TFL_OperandHasRankAtLeast<1, 2>
1462    ]> {
1463  let summary = "Embedding lookup operator";
1464
1465  let description = [{
1466    Looks up ids in a list of embedding tensors.
1467  }];
1468
1469  let arguments = (ins
1470    TFL_TensorOf<[I32]>:$lookup,
1471    TFL_TensorOf<[F32, I8, UI8]>:$value
1472   );
1473
1474  let results = (outs TFL_TensorOf<[F32, I8, UI8]>:$output);
1475}
1476
1477def TFL_EqualOp: TFL_Op<"equal", [
1478    Commutative,
1479    NoSideEffect,
1480    ResultsBroadcastableShape,
1481    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1482    ComparisonOpSameElementTypeConstraint]> {
1483  let summary = "Equal operator";
1484
1485  let description = [{
1486    Returns the truth element of x == y element-wise
1487  }];
1488
1489  let arguments = (
1490    ins
1491    TFL_TensorOf<[I1, F32, I32, I64, QI8, QUI8, UI8, TFL_Str]>:$x,
1492    TFL_TensorOf<[I1, F32, I32, I64, QI8, QUI8, UI8, TFL_Str]>:$y
1493  );
1494
1495  let results = (outs TFL_BoolTensor:$output);
1496
1497  let builders = [TFL_ComparisonBinaryBuilder];
1498}
1499
1500def TFL_ExpOp: TFL_Op<"exp", [
1501    NoSideEffect,
1502    SameOperandsAndResultType,
1503    NoQuantizableResult]> {
1504  let summary = "Natural exponentiation operator";
1505
1506  let description = [{
1507    Performs element-wise natural exponentiation operation on input.
1508  }];
1509
1510  let arguments = (ins TFL_FpTensor:$x);
1511
1512  let results = (outs TFL_FpTensor:$y);
1513
1514  let hasOptions = 0b1;
1515}
1516
1517def TFL_ExpandDimsOp: TFL_Op<"expand_dims", [
1518    NoSideEffect,
1519    SameOperandsAndResultsScale,
1520    PredOpTrait<"input and output must have same element type",
1521      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1522  let summary = "Inserts a dimension of 1 into a tensor's shape.";
1523
1524  let description = [{
1525Given a tensor `input`, this operation inserts a dimension of 1 at the
1526dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
1527zero; if you specify a negative number for `axis` it is counted backward from
1528the end.
1529
1530This operation is useful if you want to add a batch dimension to a single
1531element. For example, if you have a single image of shape `[height, width,
1532channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
1533which will make the shape `[1, height, width, channels]`.
1534
1535Other examples:
1536
1537```
1538# 't' is a tensor of shape [2]
1539shape(expand_dims(t, 0)) ==> [1, 2]
1540shape(expand_dims(t, 1)) ==> [2, 1]
1541shape(expand_dims(t, -1)) ==> [2, 1]
1542
1543# 't2' is a tensor of shape [2, 3, 5]
1544shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
1545shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
1546shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
1547```
1548
1549This operation requires that:
1550
1551`-1-input.dims() <= dim <= input.dims()`
1552
1553This operation is related to `squeeze()`, which removes dimensions of
1554size 1.
1555  }];
1556
1557  // TODO: Restriction on dim's size and valid range are not modeled here.
1558  let arguments = (ins AnyTensor:$input, TFL_I32OrI64Tensor:$dim);
1559
1560  let results = (outs AnyTensor:$output);
1561
1562  let hasOptions = 1;
1563}
1564
1565def TFL_SqueezeOp: TFL_Op<"squeeze", [NoSideEffect,
1566                                      SameOperandsAndResultsScale]> {
1567  let summary = "Removes dimensions of size 1 from the shape of a tensor.";
1568
1569  let description = [{
1570Given a tensor `input`, this operation returns a tensor of the same type with
1571all dimensions of size 1 removed. If you don't want to remove all size 1
1572dimensions, you can remove specific size 1 dimensions by specifying
1573`squeeze_dims`.
1574
1575For example:
1576
1577```
1578# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1579shape(squeeze(t)) ==> [2, 3]
1580```
1581
1582Or, to remove specific size 1 dimensions:
1583
1584```
1585# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
1586shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
1587```
1588  }];
1589
1590  let arguments = (ins
1591    AnyTensor:$input,
1592    Confined<DefaultValuedAttr<I64ArrayAttr, "{}">, [TFL_ArrayMaxCount<8>]>:$squeeze_dims
1593  );
1594
1595  let results = (outs
1596    AnyTensor:$output
1597  );
1598
1599  let hasOptions = 1;
1600
1601  let customOption = "SqueezeOptions";
1602}
1603
1604def TFL_FillOp: TFL_Op<"fill", [
1605    NoSideEffect,
1606    PredOpTrait<"input and result must have same element type",
1607      TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
1608  let summary = "Fill the tensor with given value.";
1609  let description = [{
1610    Fill the tensor with given value.
1611  }];
1612
1613  let arguments = (ins TFL_I32OrI64Tensor:$dims,
1614                   TFL_TensorOf<[F32, I32, I64, I1, QI8, QI16, TFL_Str]>:$input);
1615
1616  let results = (outs TFL_TensorOf<[F32, I32, I64, I1, QI8, QI16, TFL_Str]>:$result);
1617
1618  let hasOptions = 0;
1619}
1620
1621def TFL_FloorOp: TFL_Op<"floor", [
1622    NoSideEffect,
1623    SameOperandsAndResultShape,
1624    SameOperandsAndResultType,
1625    NoQuantizableResult]> {
1626  let summary = "Floor operator";
1627
1628  let description = [{
1629    Returns element-wise floor value of the input.
1630  }];
1631
1632  let arguments = (ins TFL_FpTensor:$x);
1633
1634  let results = (outs TFL_FpTensor:$y);
1635}
1636
1637def TFL_FloorDivOp : TFL_Op<"floor_div", [
1638    ResultsBroadcastableShape,
1639    NoSideEffect,
1640    BinaryOpSameElementTypeConstraint,
1641    PredOpTrait<"lhs and output must have same element type",
1642      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1643    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1644    NoQuantizableResult]> {
1645  let summary = "Floor div operator";
1646
1647  let description = [{
1648    Element-wise floor div operation.
1649  }];
1650
1651  let arguments = (
1652    ins TFL_TensorOf<[F32, I32]>:$lhs, TFL_TensorOf<[F32, I32]>:$rhs);
1653
1654  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
1655
1656  let builders = [TFL_BroadcastableBinaryBuilder];
1657
1658  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1659
1660  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1661}
1662
1663def TFL_FloorModOp : TFL_Op<"floor_mod", [
1664    ResultsBroadcastableShape,
1665    NoSideEffect,
1666    BinaryOpSameElementTypeConstraint,
1667    PredOpTrait<"lhs and output must have same element type",
1668      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1669    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1670    NoQuantizableResult]> {
1671  let summary = "Division reminder";
1672
1673  let description = [{
1674    Element-wise division reminder operation.
1675  }];
1676
1677  let arguments = (
1678    ins TFL_TensorOf<[I32, I64, F32]>:$lhs,
1679    TFL_TensorOf<[I32, I64, F32]>:$rhs);
1680
1681  let results = (outs TFL_TensorOf<[I32, I64, F32]>:$output);
1682
1683  let builders = [TFL_BroadcastableBinaryBuilder];
1684}
1685
1686def TFL_GreaterOp : TFL_Op<"greater", [
1687    ResultsBroadcastableShape,
1688    ComparisonOpSameElementTypeConstraint,
1689    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1690    NoSideEffect]> {
1691  let summary = "Greater operator";
1692
1693  let description = [{
1694    Element-wise greater operation.
1695  }];
1696
1697  let arguments = (
1698    ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$lhs,
1699    TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$rhs);
1700
1701  let results = (outs TFL_BoolTensor:$output);
1702
1703  let builders = [TFL_ComparisonBinaryBuilder];
1704
1705  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1706
1707  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1708}
1709
1710def TFL_HardSwishOp: TFL_Op<"hard_swish", [
1711    NoSideEffect,
1712    SameOperandsAndResultShape,
1713    PredOpTrait<"input and output must have same element type",
1714      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1715  let summary = "Hardswish activation function.";
1716  let description = [{
1717    Computes hard-swish activation function
1718      f(x) -> (x * relu6(x+3))/6
1719    element-wise.
1720  }];
1721
1722  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$input);
1723
1724  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$output);
1725
1726  let hasOptions = 0;
1727}
1728
1729def TFL_L2NormalizationOp : TFL_Op<"l2_normalization", [NoSideEffect,
1730    FixedOutputRangeInterface]> {
1731  let summary = "L2 Normalize Operator";
1732
1733  let description = [{
1734    L2Normalization Op
1735  }];
1736
1737  let arguments = (ins
1738    TFL_TensorOf<[F32, QUI8, QI8, QUI16, QI16, I8]>:$input,
1739    TFL_AFAttr:$fused_activation_function
1740  );
1741
1742  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QUI16, QI16, I8]>:$output);
1743
1744  let hasOptions = 1;
1745
1746  let customOption = "L2NormOptions";
1747
1748  let extraClassDeclaration = [{
1749  // FixedOutputRangeInterface:
1750  quant::UniformQuantizedType GetFixedOutputRange(
1751      bool is_signed, int bit_width) {
1752    auto result_type = output().getType();
1753    // central_value = min_value / 2 + (max_value - 1) / 2 + 1
1754    // zero_point = central_value
1755    // scale = 1. / (central_value - min_value)
1756    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
1757        /*scale=*/1.0 / 128, /*zero_point=*/0);
1758  }
1759  }];
1760}
1761
1762def TFL_LeakyReluOp: TFL_Op<"leaky_relu", [
1763    SameOperandsAndResultShape,
1764    NoSideEffect,
1765    PredOpTrait<"input and output must have same element type",
1766      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
1767  let summary = "Leaky Relu operator";
1768
1769  let description = [{
1770    Element-wise Leaky ReLU operator
1771      x -> x >= 0 ? x : (alpha * x)
1772  }];
1773
1774  let arguments = (
1775    ins TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8, QI16]>:$input,
1776    // Slope of the activation function at x < 0.
1777    F32Attr:$alpha
1778  );
1779
1780  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8, QI16]>:$output);
1781
1782  let hasOptions = 0b1;
1783}
1784
1785def TFL_LessOp : TFL_Op<"less", [
1786    ResultsBroadcastableShape,
1787    ComparisonOpSameElementTypeConstraint,
1788    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
1789    NoSideEffect]> {
1790  let summary = "Less operator";
1791
1792  let description = [{
1793    Element-wise less operation.
1794  }];
1795
1796  let arguments = (
1797    ins TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$lhs,
1798    TFL_TensorOf<[F32, I32, I64, QUI8, QI8, TFL_Quint8]>:$rhs);
1799
1800  let results = (outs TFL_BoolTensor:$output);
1801
1802  let builders = [TFL_ComparisonBinaryBuilder];
1803
1804  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1805
1806  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1807}
1808
1809def TFL_LogicalAndOp : TFL_Op<"logical_and", [
1810    NoSideEffect,
1811    NoQuantizableResult]> {
1812  let summary = "Logical AND operator";
1813
1814  let description = [{
1815    Element-wise logical AND operation.
1816  }];
1817
1818  let arguments = (
1819    ins TFL_BoolTensor:$lhs,
1820    TFL_BoolTensor:$rhs);
1821
1822  let results = (outs TFL_BoolTensor:$output);
1823
1824  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1825
1826  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1827}
1828
1829def TFL_LogicalNotOp : TFL_Op<"logical_not", [
1830    NoSideEffect,
1831    SameOperandsAndResultShape,
1832    NoQuantizableResult]> {
1833  let summary = "Logical NOT operator";
1834
1835  let description = [{
1836    Element-wise logical NOT operation.
1837  }];
1838
1839  let arguments = (ins TFL_BoolTensor:$lhs);
1840
1841  let results = (outs TFL_BoolTensor:$output);
1842}
1843
1844def TFL_LogicalOrOp : TFL_Op<"logical_or", [
1845    NoSideEffect,
1846    NoQuantizableResult]> {
1847  let summary = "Logical OR operator";
1848
1849  let description = [{
1850    Element-wise logical OR operation.
1851  }];
1852
1853  let arguments = (
1854    ins TFL_BoolTensor:$lhs,
1855    TFL_BoolTensor:$rhs);
1856
1857  let results = (outs TFL_BoolTensor:$output);
1858
1859  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
1860
1861  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
1862}
1863
1864def TFL_LogisticOp: TFL_Op<"logistic", [
1865    NoSideEffect,
1866    PredOpTrait<"x and y must have same element type",
1867      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1868    SameOperandsAndResultShape,
1869    FixedOutputRangeInterface]> {
1870  let summary = "Logistic operator";
1871
1872  let description = [{
1873    Computes element-wise Sigmoid of input
1874  }];
1875
1876  let arguments = (ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$x);
1877
1878  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$y);
1879
1880  let extraClassDeclaration = [{
1881  // FixedOutputRangeInterface:
1882  quant::UniformQuantizedType GetFixedOutputRange(
1883      bool is_signed, int bit_width) {
1884    auto result_type = y().getType();
1885    // zero_point = 0
1886    // scale = 1. / (max_value + 1)
1887    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
1888        /*scale=*/1.0 / 256, /*zero_point=*/-128);
1889  }
1890  }];
1891}
1892
1893def TFL_LogOp: TFL_Op<"log", [
1894    NoSideEffect,
1895    SameOperandsAndResultShape,
1896    SameOperandsAndResultType,
1897    NoQuantizableResult]> {
1898  let summary = "Natural logarithm operator";
1899
1900  let description = [{
1901    Performs element-wise natural logarithm operation on input.
1902  }];
1903
1904  let arguments = (ins TFL_FpTensor:$x);
1905
1906  let results = (outs TFL_FpTensor:$y);
1907
1908  let hasFolder = 1;
1909}
1910
1911def TFL_LogSoftmaxOp : TFL_Op<"log_softmax", [
1912    NoSideEffect,
1913    SameOperandsAndResultShape,
1914    PredOpTrait<"x and y must have same element type",
1915      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1916    FixedOutputRangeInterface]> {
1917  let summary = "Log softmax operator";
1918
1919  let description = [{
1920    Computes element-wise log softmax activations with the following formula
1921
1922      input - log(reduce_sum(exp(input), dim))
1923  }];
1924
1925  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$input);
1926
1927  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, TFL_Quint8]>:$output);
1928
1929  let hasOptions = 1;
1930
1931  let extraClassDeclaration = [{
1932  // FixedOutputRangeInterface:
1933  quant::UniformQuantizedType GetFixedOutputRange(
1934      bool is_signed, int bit_width) {
1935    auto result_type = output().getType();
1936    // zero_point = max_value
1937    // scale = -log_softmax_output_min / (max_value + 1)
1938    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
1939        /*scale=*/16.0 / 256, /*zero_point=*/127);
1940  }
1941  }];
1942}
1943
1944// TODO(ashwinm): Revisit the granularity of the PredOpTraits. We could
1945// break this into smaller PredOpTraits, each with more descriptive messages
1946// that would make it easier to trace failures OR, need a way to specify desc
1947// per Predicate inside the trait and get tablegen to use that to emit error
1948// message.
1949def MaxPoolOperandAndResultConstraints : PredOpTrait<"MaxPool2D operand and "
1950    "result types match specified constraints",
1951  And<[
1952    // The input and output tensors should have the same elemental type
1953    // and they should be one of the specified types below.
1954    TCopVTEtIs<0, AnyTypeOf<[F32, QI8, QUI8]>>,
1955    TFL_TCresVTEtIsSameAsOp<0, 0>]>>;
1956
1957def TFL_MaxPool2DOp : TFL_Op<"max_pool_2d", [
1958    TFL_OperandHasRank<0, 4>,
1959    PredOpTrait<"input and output must have same element type",
1960      TFL_TCresVTEtIsSameAsOp<0, 0>>,
1961    NoSideEffect,
1962    MaxPoolOperandAndResultConstraints,
1963    SameOperandsAndResultsScale]> {
1964  let summary = "Max Pool 2D op";
1965
1966  let description = [{
1967    Performs max pool 2D on input.
1968
1969    Inputs:
1970      `inputs[0]`: required: the input tensor
1971  }];
1972
1973  let arguments = (
1974    ins TFL_TensorOf<[F32, QUI8, QI8, QI16, TFL_Quint8]>:$input,
1975    TFL_PaddingAttr:$padding,
1976    I32Attr:$stride_w,
1977    I32Attr:$stride_h,
1978    I32Attr:$filter_width,
1979    I32Attr:$filter_height,
1980    TFL_AFAttr:$fused_activation_function
1981  );
1982
1983  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QI16, TFL_Quint8]>:$output);
1984
1985  let hasOptions = 1;
1986
1987  let customOption = "Pool2DOptions";
1988}
1989
1990def TFL_MaximumOp : TFL_Op<"maximum", [
1991    ResultsBroadcastableShape,
1992    NoSideEffect,
1993    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
1994    Commutative,
1995    SameOperandsAndResultsScale]> {
1996  let summary = "Max operator";
1997  let description = [{
1998    Element-wise max operation.
1999  }];
2000
2001  let arguments = (
2002    ins TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$lhs,
2003    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$rhs
2004  );
2005
2006  let results = (outs
2007    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$max
2008  );
2009
2010  let builders = [TFL_BroadcastableBinaryBuilder];
2011
2012  let hasOptions = 0;
2013}
2014
2015def TFL_MeanOp : TFL_Op<"mean", [
2016    PredOpTrait<"input and output must have same element type",
2017      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2018    NoSideEffect]> {
2019  let summary = "Mean operator";
2020
2021  let description = [{
2022    Computes the mean of elements across dimensions of a tensor.
2023    Reduces input_tensor along the dimensions given in axis.
2024    Unless keepdims is true, the rank of the tensor is reduced by 1 for
2025    each entry in axis. If keepdims is true, the reduced dimensions are retained
2026    with length 1.
2027  }];
2028
2029  let arguments = (ins
2030    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, UI8, QI16]>:$input,
2031    TFL_TensorOf<[I32, I64]>:$axis,
2032    BoolAttr:$keep_dims
2033  );
2034
2035  let results = (outs
2036    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, UI8, QI16]>:$output);
2037
2038  let hasOptions = 1;
2039  let customOption = "ReducerOptions";
2040}
2041
2042def TFL_OneHotOp : TFL_Op<"one_hot", [NoSideEffect]> {
2043  let summary = "OneHot operator";
2044
2045  let description = [{
2046    Returns a one-hot tensor.The locations represented by indices in `indices`
2047    take value `on_value`, while all other locations take value `off_value`.
2048
2049    If the input `indices` is rank `N`, the output will have rank `N+1`,
2050    The new axis is created at dimension `axis` (default: the new axis is
2051    appended at the end).
2052  }];
2053
2054  let arguments = (ins
2055    TFL_TensorOf<[I32, I64]>:$indices,
2056    TFL_I32Tensor:$depth,
2057    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$on_value,
2058    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$off_value,
2059
2060    I32Attr:$axis
2061  );
2062
2063  let results = (outs
2064    TFL_TensorOf<[F32, I32, I64, I1, I8, UI8]>:$output
2065  );
2066
2067  let hasOptions = 1;
2068}
2069
2070def TFL_RoundOp: TFL_Op<"round", [
2071    NoSideEffect,
2072    SameOperandsAndResultShape,
2073    SameOperandsAndResultType,
2074    NoQuantizableResult]> {
2075  let summary = "Round operator";
2076
2077  let description = [{
2078Rounds the values of a tensor to the nearest integer, element-wise.
2079  }];
2080
2081  let arguments = (ins
2082    TFL_FpTensor:$x
2083  );
2084
2085  let results = (outs
2086    TFL_FpTensor:$y
2087  );
2088}
2089
2090def TFL_SliceOp : TFL_Op<"slice", [
2091    PredOpTrait<"input and output must have same element type",
2092      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2093    NoSideEffect,
2094    SameOperandsAndResultsScale,
2095    TFL_OperandHasRankAtMost<0, 5>,
2096    TFL_OperandHasRankAtMost<1, 1>,
2097    TFL_OperandHasRankAtMost<2, 1>]> {
2098  let summary = "Return a slice from 'input'.";
2099
2100  let description = [{
2101The output tensor is a tensor with dimensions described by 'size'
2102whose values are extracted from 'input' starting at the offsets in
2103'begin'.
2104
2105`begin` is zero-based; `size` is one-based. If size[i] is -1, all remaining
2106elements in dimension i are included in the slice. In other words, this is
2107equivalent to setting:
2108  size[i] = input.dim_size(i) - begin[i]
2109
2110*Requirements*:
2111  0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
2112  }];
2113
2114  let arguments = (ins
2115    TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2116    TFL_I32OrI64Tensor:$begin,
2117    TFL_I32OrI64Tensor:$size
2118  );
2119
2120  let results = (outs
2121    TFL_TensorOf<[F32, I32, I64, I8, UI8, I1, TFL_Str, QI8, QUI8, TFL_Quint8, QI16]>:$output
2122  );
2123
2124  let verifier = [{ return Verify(*this); }];
2125
2126  let hasCanonicalizer = 1;
2127}
2128
2129def TFL_SumOp: TFL_Op<"sum", [
2130    PredOpTrait<"input and output must have same element type",
2131      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2132    NoSideEffect]> {
2133
2134  let summary = "Sum operator";
2135
2136  let description = [{
2137    Computes the sum reduction along the specified axes
2138  }];
2139
2140  let arguments = (ins
2141    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2142    TFL_I32Tensor:$axes,
2143    BoolAttr:$keep_dims
2144  );
2145
2146  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2147
2148  let hasOptions = 1;
2149  let customOption = "ReducerOptions";
2150}
2151
2152def TFL_ReduceMinOp: TFL_Op<"reduce_min", [
2153    PredOpTrait<"input and output must have same element type",
2154      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2155    NoSideEffect,
2156    SameOperandsAndResultsScale]> {
2157  let summary = "Min-reduction operator";
2158
2159  let description = [{
2160    Computes the min reduction along the specified axes
2161  }];
2162
2163  let arguments = (ins
2164    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2165    TFL_I32Tensor:$axes,
2166    BoolAttr:$keep_dims
2167  );
2168
2169  let results = (outs
2170    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2171
2172  let hasOptions = 1;
2173  let customOption = "ReducerOptions";
2174}
2175
2176def TFL_ReduceMaxOp: TFL_Op<"reduce_max", [
2177    PredOpTrait<"input and output must have same element type",
2178      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2179    NoSideEffect,
2180    SameOperandsAndResultsScale]> {
2181  let summary = "Max-reduction operator";
2182
2183  let description = [{
2184    Computes the max reduction along the specified axes
2185  }];
2186
2187  let arguments = (ins
2188    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2189    TFL_I32Tensor:$axes,
2190    BoolAttr:$keep_dims
2191  );
2192
2193  let results = (outs
2194    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2195
2196  let hasOptions = 1;
2197  let customOption = "ReducerOptions";
2198}
2199
2200def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [
2201    PredOpTrait<"input and output must have same element type",
2202      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2203    NoSideEffect]> {
2204  let summary = "Prod-reduction operator";
2205
2206  let description = [{
2207    Computes the product along the specified axes
2208  }];
2209
2210  let arguments = (ins
2211    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2212    TFL_I32Tensor:$axes,
2213    BoolAttr:$keep_dims
2214  );
2215
2216  let results = (outs
2217    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2218
2219  let hasOptions = 1;
2220  let customOption = "ReducerOptions";
2221}
2222
2223def TFL_MinimumOp : TFL_Op<"minimum", [
2224    ResultsBroadcastableShape,
2225    NoSideEffect,
2226    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 5>,
2227    Commutative,
2228    SameOperandsAndResultsScale]> {
2229  let summary = "Min operator";
2230  let description = [{
2231    Element-wise min operation.
2232  }];
2233
2234  let arguments = (
2235    ins TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$lhs,
2236    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$rhs
2237  );
2238
2239  let results = (outs
2240    TFL_TensorOf<[F32, TFL_Int32Or64, QI8, QUI8, QI16]>:$min
2241  );
2242
2243  let builders = [TFL_BroadcastableBinaryBuilder];
2244
2245  let hasOptions = 0;
2246}
2247
2248def TFL_MulOp : TFL_Op<"mul", [
2249    ResultsBroadcastableShape,
2250    NoSideEffect,
2251    Commutative,
2252    BinaryOpSameElementTypeConstraint,
2253    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
2254      CPred<"TFL::VerifyMulOpShapeConstraints(llvm::cast<MulOp>($_op))">>]> {
2255  let summary = "Multiplication operator";
2256
2257  let description = [{
2258    Element-wise multiplication operation.
2259  }];
2260
2261  let arguments = (
2262    ins TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$lhs,
2263    TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$rhs,
2264    TFL_AFAttr:$fused_activation_function);
2265
2266  let results = (outs TFL_TensorOf<[F32, I32, QI8, QUI8, QI16]>:$output);
2267
2268  let hasFolder = 1;
2269
2270  let builders = [TFL_FusedBroadcastableBinaryBuilder];
2271
2272  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
2273
2274  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
2275
2276  let hasOptions = 1;
2277}
2278
2279def TFL_NegOp: TFL_Op<"neg", [
2280    NoSideEffect,
2281    SameOperandsAndResultShape,
2282    SameOperandsAndResultType,
2283    NoQuantizableResult]> {
2284  let summary = "Negation operator";
2285
2286  let description = [{
2287    Computes element-wise negation of input
2288  }];
2289
2290  let arguments = (ins TFL_TensorOf<[F32, I32, I64]>:$x);
2291
2292  let results = (outs TFL_TensorOf<[F32, I32, I64]>:$y);
2293
2294  let hasOptions = 0b1;
2295
2296  let hasFolder = 1;
2297}
2298
2299def TFL_PackOp : TFL_Op<"pack", [
2300    TFL_SameFirstOperandAndFirstResultElementType,
2301    NoSideEffect,
2302    SameOperandsAndResultsScale]> {
2303  let summary = "Packs a list of tensors along a dimension into one tensor";
2304
2305  let description = [{
2306    Packs a list of `values_count` rank-`R` tensors into one rank-`(R+1)`
2307    tensor.
2308
2309    Packs the `values_count` tensors in `values` into a tensor with rank one
2310    higher than each tensor in `values`, by packing them along the `axis`
2311    dimension.
2312
2313    Given a list of tensors of shape `(A, B, C)`;
2314
2315    if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
2316    if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
2317    Etc.
2318
2319    For example:
2320
2321    ```
2322    # 'x' is [1, 4]
2323    # 'y' is [2, 5]
2324    # 'z' is [3, 6]
2325    pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
2326    pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
2327    ```
2328
2329    This is the opposite of `unpack`.
2330  }];
2331
2332  let arguments = (ins
2333    TFL_VariadicTensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QUI8, QI16, TFL_Quint8]>:$values,
2334
2335    Confined<I32Attr, [IntPositive]>:$values_count,
2336    I32Attr:$axis
2337  );
2338
2339  let results = (outs
2340    TFL_TensorOf<[F32, I8, I16, I32, I64, UI8, QI8, QUI8, QI16, TFL_Quint8]>:$output
2341  );
2342
2343  let verifier = [{ return Verify(*this); }];
2344
2345  let hasCanonicalizer = 1;
2346
2347  let hasOptions = 1;
2348}
2349
2350def TFL_PadOp : TFL_Op<"pad", [
2351    PredOpTrait<"input and output must have same element type",
2352      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2353    NoSideEffect,
2354    SameOperandsAndResultsScale,
2355    TFL_OperandHasRankAtMost<0, 4>,
2356    TFL_OperandHasRank<1, 2>,
2357    TFL_OperandRankEquals1DimOfOperand<0, 1>,
2358    PredOpTrait<"the first dim size of the padding argument must be at most 4",
2359      Or<[TFL_OperandIsUnrankedPred<1>,
2360          TFL_OperandDimIsAtMost<1, 0, 4>]>>]> {
2361  let summary = "Padding operator";
2362
2363  let description = [{
2364    This operation pads a `input` with zeros according to the `paddings` you
2365    specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
2366    the rank of `input`. For each dimension D of `input`, `paddings[D, 0]`
2367    indicates how many zeros to add before the contents of `input` in that
2368    dimension, and `paddings[D, 1]` indicates how many zeros to add after the
2369    contents of `input` in that dimension.
2370
2371    The padded size of each dimension D of the output is:
2372
2373      `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
2374
2375    For example:
2376
2377    ```
2378    # 't' is [[1, 1], [2, 2]]
2379    # 'paddings' is [[1, 1], [2, 2]]
2380    # rank of 't' is 2
2381    pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
2382                          [0, 0, 1, 1, 0, 0]
2383                          [0, 0, 2, 2, 0, 0]
2384                          [0, 0, 0, 0, 0, 0]]
2385    ```
2386  }];
2387
2388  let arguments = (ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2389    TFL_I32OrI64Tensor:$padding);
2390
2391  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2392
2393  let hasOptions = 1;
2394}
2395
2396def TFL_PadV2Op : TFL_Op<"padv2", [
2397    PredOpTrait<"input and output must have same element type",
2398      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2399    NoSideEffect,
2400    SameOperandsAndResultsScale,
2401    TFL_OperandHasRankAtMost<0, 4>,
2402    TFL_OperandHasRank<1, 2>,
2403    TFL_OperandHasRank<2, 0>,
2404    TFL_OperandRankEquals1DimOfOperand<0, 1>,
2405    PredOpTrait<"the first dim size of the padding argument must be at most 4",
2406      Or<[TFL_OperandIsUnrankedPred<1>,
2407          TFL_OperandDimIsAtMost<1, 0, 4>]>>,
2408    PredOpTrait<"input and constant value operands must have same element type",
2409      TFL_TCopVTEtAreSameAt<0, 2>>]> {
2410  let summary = "Padding operator v2";
2411
2412  let description = [{
2413    This operation pads a `input` according to the `paddings` and
2414    `constant_values` you specify. `paddings` is an integer tensor with shape
2415    `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`,
2416    `paddings[D, 0]` indicates how many zeros to add before the contents of
2417    `input` in that dimension, and `paddings[D, 1]` indicates how many zeros to
2418    add after the contents of `input` in that dimension. `constant_values` is a
2419    scalar tensor of the same type as `input` that indicates the value to use
2420    for padding `input`.
2421
2422    The padded size of each dimension D of the output is:
2423
2424      `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
2425
2426    For example:
2427
2428    ```
2429    # 't' is [[1, 1], [2, 2]]
2430    # 'paddings' is [[1, 1], [2, 2]]
2431    # rank of 't' is 2
2432    pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
2433                          [0, 0, 1, 1, 0, 0]
2434                          [0, 0, 2, 2, 0, 0]
2435                          [0, 0, 0, 0, 0, 0]]
2436    ```
2437  }];
2438
2439  let arguments = (
2440    ins TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$input,
2441    TFL_I32OrI64Tensor:$padding,
2442    TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$constant_values);
2443
2444  let results = (outs TFL_TensorOf<[F32, I32, I64, UI8, QI8, QUI8, TFL_Quint8]>:$output);
2445
2446  let hasOptions = 1;
2447}
2448
2449def TFL_PowOp : TFL_Op<"pow", [
2450    ResultsBroadcastableShape,
2451    NoSideEffect,
2452    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
2453    NoQuantizableResult]> {
2454  let summary = "Power operator";
2455
2456  let description = [{
2457    Element-wise power operation.
2458  }];
2459
2460  let arguments = (
2461    ins TFL_TensorOf<[F32, I32]>:$lhs,
2462    TFL_TensorOf<[F32, I32]>:$rhs);
2463
2464  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
2465
2466  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
2467
2468  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
2469
2470  let builders = [TFL_BroadcastableBinaryBuilder];
2471}
2472
2473def TFL_PReluOp : TFL_Op<"prelu", [
2474    NoSideEffect,
2475    ResultsBroadcastableShape,
2476    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
2477    BinaryOpSameElementTypeConstraint,
2478    PredOpTrait<"input and output must have the same element type",
2479      TFL_TCresVTEtIsSameAsOp<0, 0>>, AffineQuantizedOpInterface]> {
2480  let summary = "Parameterized Relu operator";
2481
2482  let description = [{
2483    Parameterized Relu operator
2484      x -> x >= 0 ? x : (alpha * x)
2485    where alpha is a trainable tensor.
2486    input and alpha should be the same size as input or be broadcastable.
2487  }];
2488
2489  let arguments = (
2490    ins TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$input,
2491    TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$alpha
2492  );
2493
2494  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8]>:$output);
2495
2496  let verifier = [{ return Verify(*this); }];
2497
2498  let extraClassDeclaration = [{
2499    // AffineQuantizedOpInterface:
2500    int GetChannelDimIndex() { return 0; }
2501    int GetQuantizationDimIndex() { return -1; }
2502  }];
2503}
2504
2505def TFL_RankOp: TFL_Op<"rank", [NoSideEffect]> {
2506  let summary = "Rank operator.";
2507  let description = [{
2508    Returns the rank of a tensor.
2509  }];
2510
2511  let arguments = (ins AnyTensor:$input);
2512
2513  let results = (outs TFL_IntTensor:$output);
2514
2515  let hasFolder = 1;
2516}
2517
2518def TFL_ReluOp: TFL_Op<"relu", [
2519    PredOpTrait<"x and y must have same element type",
2520      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2521    NoSideEffect,
2522    SameOperandsAndResultShape]> {
2523  let summary = "Relu operator";
2524
2525  let description = [{
2526    Element-wise Relu operator
2527      x -> max(0, x)
2528  }];
2529
2530  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8, QI16]>:$x);
2531
2532  let results = (outs TFL_TensorOf<[F32, QUI8, QI8, QI16]>:$y);
2533
2534  // This builder doesn't work with quantized type, so it can only be used by
2535  // non-quantization tablegen patterns. Currently, it is used by the
2536  // elementwise-move reordering pattern in the optimize_patterns.td
2537  let builders = [
2538    OpBuilderDAG<(ins "Value":$input),
2539    [{
2540      $_state.addOperands({input});
2541      $_state.addTypes(input.getType());
2542    }]>
2543  ];
2544}
2545
2546def TFL_Relu6Op: TFL_Op<"relu6", [
2547    PredOpTrait<"x and y must have same element type",
2548      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2549    NoSideEffect,
2550    SameOperandsAndResultShape]> {
2551  let summary = "Relu6 operator";
2552
2553  let description = [{
2554    Element-wise Relu6 operator
2555      x -> max(0, min(6, x))
2556  }];
2557
2558  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
2559
2560  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
2561
2562  // This builder doesn't work with quantized type, so it can only be used by
2563  // non-quantization tablegen patterns. Currently, it is used by the
2564  // elementwise-move reordering pattern in the optimize_patterns.td
2565  let builders = [
2566    OpBuilderDAG<(ins "Value":$input),
2567    [{
2568      $_state.addOperands({input});
2569      $_state.addTypes(input.getType());
2570    }]>
2571  ];
2572}
2573
2574def TFL_Relu1Op: TFL_Op<"relu_n1_to_1", [
2575    PredOpTrait<"x and y must have same element type",
2576      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2577    NoSideEffect,
2578    SameOperandsAndResultShape]> {
2579  let summary = "Relu1 operator";
2580
2581  let description = [{
2582    Element-wise Relu1 operator
2583      x -> max(-1, min(1, x))
2584  }];
2585
2586  let arguments = (ins TFL_TensorOf<[F32, QUI8, QI8]>:$x);
2587
2588  let results = (outs TFL_TensorOf<[F32, QUI8, QI8]>:$y);
2589
2590  // This builder doesn't work with quantized type, so it can only be used by
2591  // non-quantization tablegen patterns. Currently, it is used by the
2592  // elementwise-move reordering pattern in the optimize_patterns.td
2593  let builders = [
2594    OpBuilderDAG<(ins "Value":$input),
2595    [{
2596      $_state.addOperands({input});
2597      $_state.addTypes(input.getType());
2598    }]>
2599  ];
2600}
2601
2602def TFL_ReshapeOp: TFL_Op<"reshape", [
2603    NoSideEffect, SameOperandsAndResultsScale]> {
2604  let summary = "Reshape operator";
2605
2606  let description = [{
2607    Produces a tensor with the same values but different static shape defined
2608    by the output type.
2609  }];
2610
2611  let arguments = (
2612    ins AnyTensor:$input,
2613    TFL_I32Tensor:$shape);
2614
2615  let results = (outs AnyTensor:$output);
2616  let hasCanonicalizer = 0b1;
2617  let hasFolder = 1;
2618
2619  let verifier = [{ return Verify(*this); }];
2620}
2621
2622def TFL_ReverseSequenceOp : TFL_Op<"reverse_sequence", [
2623    PredOpTrait<"input and output must have same element type",
2624      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2625    NoSideEffect,
2626    TFL_OperandHasRank<1, 1>]> {
2627  let summary = "Reverses variable length slices.";
2628
2629  let description = [{
2630This op first slices `input` along the dimension `batch_dim`, and for each
2631slice `i`, reverses the first `seq_lengths[i]` elements along
2632the dimension `seq_dim`.
2633
2634The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
2635and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
2636
2637The output slice `i` along dimension `batch_dim` is then given by input
2638slice `i`, with the first `seq_lengths[i]` slices along dimension
2639`seq_dim` reversed.
2640  }];
2641
2642  let arguments = (ins
2643    TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$input,
2644    TFL_I32OrI64Tensor:$seq_lengths,
2645
2646    Confined<I32Attr, [IntNonNegative]>:$seq_dim,
2647    Confined<I32Attr, [IntNonNegative]>:$batch_dim
2648  );
2649
2650  let results = (outs
2651    TFL_TensorOf<[F32, I32, I64, QI16, QUI8, TFL_Quint8]>:$output
2652  );
2653
2654  let hasOptions = 1;
2655}
2656
2657def TFL_RsqrtOp: TFL_Op<"rsqrt", [NoSideEffect,
2658                                  TFL_SameFirstOperandAndFirstResultElementType,
2659                                  SameOperandsAndResultShape]> {
2660  let summary = "Reciprocal of square root operator";
2661
2662  let description = [{
2663    Computes element-wise reverse square root of input
2664  }];
2665
2666  let arguments = (ins TFL_TensorOf<[F32, QI8, QI16]>:$x);
2667
2668  let results = (outs TFL_TensorOf<[F32, QI8, QI16]>:$y);
2669
2670  let hasFolder = 1;
2671}
2672
2673def TFL_ShapeOp: TFL_Op<"shape", [NoSideEffect]> {
2674  let summary = "Shape operator";
2675
2676  let description = [{
2677    Returns the shape of a tensor.
2678  }];
2679
2680  let arguments = (ins AnyTensor:$input);
2681
2682  let results = (outs TFL_TensorOf<[I32, I64]>:$output);
2683
2684  DerivedTypeAttr out_type = DerivedTypeAttr<[{
2685    return getResult().getType().cast<TensorType>().getElementType();
2686  }]>;
2687
2688  let hasOptions = 1;
2689}
2690
2691def TFL_RangeOp: TFL_Op<"range", [
2692    NoSideEffect,
2693    TFL_OperandHasRank<0, 0>,
2694    TFL_OperandHasRank<1, 0>,
2695    TFL_OperandHasRank<2, 0>,
2696    PredOpTrait<"operands and output must have same element type",
2697      And<[TCresVTEtIsSameAsOp<0, 0>, TCresVTEtIsSameAsOp<0, 1>,
2698           TCresVTEtIsSameAsOp<0, 2>]>>,
2699    NoQuantizableResult]> {
2700  let summary = "Range operator";
2701
2702  let description = [{
2703    Returns a 1D tensor defined by a sequence from `start` to `limit` with
2704    a given `delta`.
2705  }];
2706
2707  let arguments = (ins
2708    TFL_TensorOf<[I32, F32]>:$start,
2709    TFL_TensorOf<[I32, F32]>:$limit,
2710    TFL_TensorOf<[I32, F32]>:$delta);
2711
2712  let results = (outs TFL_TensorOf<[I32, F32]>:$result);
2713
2714  let hasFolder = 1;
2715}
2716
2717def TFL_ReverseV2Op: TFL_Op<"reverse_v2", [
2718    PredOpTrait<"input and output must have same element type",
2719      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2720    NoSideEffect,
2721    TFL_OperandHasRank<1, 1>]> {
2722  let summary = "ReverseV2 Operator";
2723
2724  let description = [{
2725    Reverses specific dimensions of a tensor.
2726
2727    Given a tensor, and a int32/int64 tensor axis representing the set
2728    of dimensions of tensor to reverse.
2729    This operation reverses each dimension i for
2730    which there exists j s.t. axis[j] == i.
2731
2732    Args:
2733      tensor: A Tensor. Must be one of the following types:
2734      uint8, int8, int16, int32, int64, float32, bool Up to 8-D.
2735
2736      axis: A Tensor. Must be one of the following types: int32, int64.
2737      with only 1 element which is the axis index.
2738      TODO: Add support for multiple elements.
2739  }];
2740
2741  let arguments = (
2742    ins
2743    TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, QI8, TFL_Quint8, I1]>:$input,
2744    TFL_I32Tensor:$axis
2745  );
2746
2747  let results = (outs
2748    TFL_TensorOf<[F32, UI8, I16, I32, I64, QI16, QUI8, QI8, TFL_Quint8, I1]>:$output);
2749}
2750
2751// Select has many instances in TF models where one or more of its operands
2752// are unranked. Therefore, we skip adding shape constraints here.
2753def TFL_SelectOp : TFL_Op<"select", [
2754  NoSideEffect,
2755  SameOperandsAndResultsScale,
2756  PredOpTrait<"operands have same element type", TCopVTEtIsSameAs<1, 2>>,
2757  PredOpTrait<"operands and result have same element type",
2758    TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
2759  let summary = "Select operator";
2760
2761  let description = [{
2762    Select values of 'x' if the corresponding value of 'condition' is true or
2763    the value of 'y' if false. There are valid condition input sizes:
2764
2765    1. Either the same shape (in which case the select is elementwise), or
2766    2. condition must be Rank 1 and match over the first dimension.
2767  }];
2768
2769  let arguments = (ins
2770    TFL_BoolTensor:$condition,
2771    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
2772    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
2773
2774  let results = (outs
2775    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
2776
2777  // TODO(jpienaar): autogenerate this.
2778  let builders = [
2779    OpBuilderDAG<(ins "Value":$condition, "Value":$x, "Value":$y),
2780    [{
2781    auto resultType = x.getType();
2782    $_state.addOperands({condition, x, y});
2783    $_state.types.push_back(resultType);
2784  }]>];
2785
2786  let hasOptions = 1;
2787}
2788
2789def TFL_SelectV2Op : TFL_Op<"select_v2", [
2790    ResultsBroadcastableShape,
2791    NoSideEffect,
2792    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1, 2], 4>,
2793    PredOpTrait<"operands have same element type", TCopVTEtIsSameAs<1, 2>>,
2794    PredOpTrait<"operands and result have same element type",
2795      TFL_TCresVTEtIsSameAsOp<0, 1>>]> {
2796  let summary = "SelectV2 operator";
2797
2798  let description = [{
2799    Select values of 'x' if the corresponding value of 'condition' is true or
2800    the value of 'y' if false. There are valid condition input sizes:
2801
2802    1. Either the same shape (in which case the select is elementwise), or
2803    2. Broadcastable shapes between 'condition', 'x' and 'y'.
2804  }];
2805
2806  let arguments = (ins
2807    TFL_BoolTensor:$condition,
2808    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$x,
2809    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$y);
2810
2811  let results = (outs
2812    TFL_TensorOf<[F32, I1, I8, I16, I32, I64, QI8, QUI8, QI16, TFL_Quint8]>:$output);
2813
2814  let builders = [
2815    OpBuilderDAG<(ins "Value":$cond, "Value":$x, "Value":$y),
2816    [{
2817    BuildSelectV2Op(&$_builder, $_state, cond, x, y);
2818  }]>];
2819
2820  let hasOptions = 1;
2821}
2822
2823def TFL_SinOp: TFL_Op<"sin", [
2824    NoSideEffect,
2825    SameOperandsAndResultShape,
2826    SameOperandsAndResultType,
2827    NoQuantizableResult]> {
2828  let summary = "Sine operator";
2829
2830  let description = [{
2831    Computes element-wise Sine of input
2832  }];
2833
2834  let arguments = (ins TFL_FpTensor:$x);
2835
2836  let results = (outs TFL_FpTensor:$y);
2837
2838  let hasFolder = 1;
2839}
2840
2841def TFL_SoftmaxOp : TFL_Op<"softmax", [
2842    NoSideEffect,
2843    PredOpTrait<"input and output must have same element type",
2844      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2845    TFL_OperandHasRankRange<0, 1, 4>,
2846    SameOperandsAndResultShape,
2847    FixedOutputRangeInterface]> {
2848  let summary = "Softmax operator";
2849
2850  let description = [{
2851    Computes element-wise softmax activations with the following formula
2852
2853      exp(input) / tf.reduce_sum(exp(input * beta), dim)
2854  }];
2855
2856  let arguments = (
2857    ins TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8, QI16]>:$input,
2858    F32Attr:$beta
2859  );
2860
2861  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, TFL_Quint8, QI16]>:$output);
2862
2863  let hasOptions = 1;
2864
2865  let extraClassDeclaration = [{
2866  // FixedOutputRangeInterface:
2867  quant::UniformQuantizedType GetFixedOutputRange(
2868      bool is_signed, int bit_width) {
2869    auto result_type = output().getType();
2870    // zero_point = 0
2871    // scale = 1. / (max_value + 1)
2872    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
2873        /*scale=*/1.0 / 256, /*zero_point=*/-128);
2874  }
2875  }];
2876}
2877
2878def TFL_SqrtOp: TFL_Op<"sqrt", [
2879    NoSideEffect,
2880    SameOperandsAndResultShape,
2881    SameOperandsAndResultType,
2882    NoQuantizableResult]> {
2883  let summary = "Square root operator";
2884
2885  let description = [{
2886    Computes element-wise Square root of input
2887  }];
2888
2889  let arguments = (ins TFL_FpTensor:$x);
2890
2891  let results = (outs TFL_FpTensor:$y);
2892
2893  let hasFolder = 1;
2894}
2895
2896def TFL_SquareOp: TFL_Op<"square", [
2897    NoSideEffect,
2898    SameOperandsAndResultShape,
2899    SameOperandsAndResultType,
2900    NoQuantizableResult]> {
2901  let summary = "Square operator";
2902
2903  let description = [{
2904    Computes element-wise Square of input
2905  }];
2906
2907  let arguments = (ins TFL_FpTensor:$x);
2908
2909  let results = (outs TFL_FpTensor:$y);
2910
2911  let hasOptions = 0b1;
2912
2913  let hasFolder = 1;
2914}
2915
2916def TFL_SubOp : TFL_Op<"sub", [
2917    ResultsBroadcastableShape,
2918    BinaryOpSameElementTypeConstraint,
2919    TFL_RuntimePredOpTrait<"Operands do not have valid shapes",
2920      CPred<"TFL::VerifySubOpShapeConstraints(llvm::cast<SubOp>($_op))">>,
2921    NoSideEffect]> {
2922  let summary = "Subtraction operator";
2923
2924  let description = [{
2925    Element-wise subtraction operation.
2926  }];
2927
2928  let arguments = (
2929    ins TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$lhs,
2930    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$rhs,
2931    TFL_AFAttr:$fused_activation_function);
2932
2933  let results = (outs TFL_TensorOf<[F32, I32, I64, QI8, QUI8, QI16]>:$output);
2934
2935  let hasFolder = 1;
2936
2937  let builders = [TFL_FusedBroadcastableBinaryBuilder];
2938
2939  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
2940
2941  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
2942
2943  let hasOptions = 1;
2944}
2945
2946def TFL_SquaredDifferenceOp : TFL_Op<"squared_difference", [
2947    TFL_OperandsHaveSameShapesOrBroadcastableShape<[0, 1], 4>,
2948    BinaryOpSameElementTypeConstraint,
2949    TFL_SameFirstOperandAndFirstResultElementType,
2950    ResultsBroadcastableShape,
2951    NoSideEffect,
2952    NoQuantizableResult]> {
2953  let summary = "Squared difference operator";
2954
2955  let description = [{
2956    Element-wise squared difference operation.
2957  }];
2958
2959  let arguments = (
2960    ins TFL_TensorOf<[F32, I32, QI8]>:$lhs,
2961    TFL_TensorOf<[F32, I32, QI8]>:$rhs);
2962
2963  let results = (outs TFL_TensorOf<[F32, I32, QI8]>:$output);
2964
2965  let builders = [TFL_BroadcastableBinaryBuilder];
2966
2967  let parser = [{ return mlir::impl::parseOneResultSameOperandTypeOp(parser, result); }];
2968
2969  let printer = [{ return mlir::impl::printOneResultOp(getOperation(), p); }];
2970}
2971
2972def TFL_TanhOp: TFL_Op<"tanh", [
2973    NoSideEffect,
2974    SameOperandsAndResultShape,
2975    PredOpTrait<"input and output must have same element type",
2976      TFL_TCresVTEtIsSameAsOp<0, 0>>,
2977    FixedOutputRangeInterface]> {
2978  let summary = "Hyperbolic tangent operator";
2979
2980  let description = [{
2981    Computes element-wise Hyperbolic tangent of input
2982  }];
2983
2984  let arguments = (ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$input);
2985
2986  let results = (outs TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$output);
2987
2988  // This builder doesn't work with quantized type, so it can only be used by
2989  // non-quantization tablegen patterns. Currently, it is used by the
2990  // elementwise-move reordering pattern in the optimize_patterns.td
2991  let builders = [
2992    OpBuilderDAG<(ins "Value":$input),
2993    [{
2994      $_state.addOperands({input});
2995      $_state.addTypes(input.getType());
2996    }]>
2997  ];
2998
2999  let extraClassDeclaration = [{
3000  // FixedOutputRangeInterface:
3001  quant::UniformQuantizedType GetFixedOutputRange(
3002      bool is_signed, int bit_width) {
3003    auto result_type = output().getType();
3004    // central_value = min_value / 2 + (max_value - 1) / 2 + 1
3005    // zero_point = central_value
3006    // scale = 1. / (central_value - min_value)
3007    return quant::GetFixedOutputRange(is_signed, bit_width, result_type,
3008        /*scale=*/1.0 / 128, /*zero_point=*/0);
3009  }
3010  }];
3011}
3012
3013def TFL_TileOp: TFL_Op<"tile", [
3014    NoSideEffect,
3015    SameOperandsAndResultsScale,
3016    PredOpTrait<"input and output must have same element type",
3017      TFL_TCresVTEtIsSameAsOp<0, 0>>]> {
3018  let summary = "Tile operator.";
3019  let description = [{
3020    Constructs a tensor by tiling a given tensor.
3021
3022   This operation creates a new tensor by replicating input
3023   multiples times. The output tensor's i'th dimension has
3024   input.dims(i) * multiples[i] elements, and the values of input
3025   are replicated multiples[i] times along the 'i'th dimension.
3026   For example, tiling [a b c d] by [2] produces [a b c d a b c d].
3027  }];
3028
3029  let arguments = (ins
3030    TFL_TensorOf<[F32, I1, I32, I64, UI8, QUI8, TFL_Str]>:$input,
3031    TFL_I32OrI64Tensor:$multiples);
3032
3033  let results = (outs
3034    TFL_TensorOf<[F32, I1, I32, I64, UI8, QUI8, TFL_Str]>:$output);
3035
3036  let hasOptions = 0;
3037}
3038
3039// TODO(jpienaar): Maybe make it accept any single element tensor as `k`.
3040// TODO(jpienaar): Check that input has one or more dimensions.
3041// TODO(jpienaar): Check that k is less or equal the internal dimension
3042def TFL_TopKV2Op: TFL_Op<"topk_v2", [
3043    NoSideEffect,
3044    TFL_OperandHasRankAtLeast<0, 1>,
3045    TFL_OperandHasRank<1, 0>,
3046    PredOpTrait<"result and input element type match",
3047      TFL_TCresVTEtIsSameAsOp<0,0>>,
3048    SameOperandsAndResultsScale]> {
3049  let summary = "TopK operator";
3050
3051  let description = [{
3052    Returns the top `k` largest element along each last dimensional slice of
3053    `input` and the indices of values within the last dimension of the input
3054    tensor.
3055  }];
3056
3057  let arguments = (ins
3058    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$input,
3059    TFL_I32Tensor:$k);
3060
3061  let results = (outs
3062    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$values,
3063    TFL_I32Tensor:$indices);
3064
3065  let builders = [
3066    OpBuilderDAG<(ins "Value":$input, "Value":$k),
3067    [{ BuildTopKOp(&$_builder, $_state, input, k); }]>];
3068
3069  let hasOptions = 1;
3070}
3071
3072def TFL_TransposeOp : TFL_Op<"transpose", [
3073    NoSideEffect,
3074    TFL_OperandHasRankAtMost<0, 5>,
3075    TFL_OperandHasRank<1, 1>,
3076    PredOpTrait<"input and output must have same element type",
3077      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3078    SameOperandsAndResultsScale]> {
3079  let summary = "Transpose operator";
3080
3081  let description = [{
3082    Returns the Transpose of x
3083  }];
3084
3085  let arguments = (ins
3086    TFL_TensorOf<[I32, F32, I8, UI8, QI8, QUI8, TFL_Quint8, I1, I64, QI16]>:$input,
3087    TFL_TensorOf<[I32]>:$perm
3088  );
3089
3090  let results = (outs
3091    TFL_TensorOf<[I32, F32, I8, UI8, QI8, QUI8, TFL_Quint8, I1, I64, QI16]>:$output
3092  );
3093
3094  let verifier = [{ return Verify(*this); }];
3095
3096  let hasFolder = 1;
3097}
3098
3099def TFL_UnpackOp : TFL_Op<"unpack", [
3100    NoSideEffect,
3101    SameOperandsAndResultElementType,
3102    SameOperandsAndResultsScale,
3103    DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
3104  let summary = "Unpacks a tensor along a dimension into multiple tensors";
3105
3106  let description = [{
3107    Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
3108
3109    Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
3110    For example, given a tensor of shape `(A, B, C, D)`;
3111
3112    If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
3113      and each tensor in `output` will have shape `(B, C, D)`. (Note that the
3114      dimension unpacked along is gone, unlike `split`).
3115
3116    If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
3117      and each tensor in `output` will have shape `(A, C, D)`.
3118    Etc.
3119
3120    This is the opposite of `pack`.
3121  }];
3122
3123  let arguments = (ins
3124    TFL_TensorOf<[F32, I1, I8, UI8, I32, QI8, QUI8, I16, QI16]>:$input,
3125
3126    Confined<I32Attr, [IntNonNegative]>:$num,
3127    I32Attr:$axis
3128  );
3129
3130  let results = (outs
3131    TFL_VariadicTensorOf<[F32, I1, I8, UI8, I32, QI8, QUI8, I16, QI16]>:$outputs
3132  );
3133
3134  let extraClassDeclaration = [{
3135    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r);
3136  }];
3137
3138  let hasOptions = 1;
3139}
3140
3141def TFL_ZerosLikeOp: TFL_Op<"zeros_like", [
3142    PredOpTrait<"input and output must have same element type",
3143      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3144    SameOperandsAndResultType,
3145    SameOperandsAndResultShape,
3146    NoSideEffect,
3147    NoQuantizableResult]> {
3148  let summary = "ZerosLike operator";
3149
3150  let description = [{
3151    Returns a tensor of zeros with the same shape and type as the input tensor.
3152  }];
3153
3154  let arguments = (ins TFL_TensorOf<[I64, I32, F32]>:$input);
3155
3156  let results = (outs TFL_TensorOf<[I64, I32, F32]>:$output);
3157
3158  let hasOptions = 1;
3159}
3160
3161def TFL_BatchToSpaceNdOp: TFL_Op<"batch_to_space_nd", [
3162    NoSideEffect,
3163    SameOperandsAndResultsScale,
3164    PredOpTrait<"input and output must have same element type",
3165      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3166    TFL_OperandHasRankRange<0, 3, 4>,
3167    TFL_OperandHasRank<1, 1>,
3168    TFL_OperandHasRank<2, 2>
3169  ]> {
3170  let summary = "BatchToSpaceNd operator";
3171
3172  let description = [{
3173    This operation reshapes the "batch" dimension 0 into space dimensions.
3174  }];
3175
3176  let arguments = (ins
3177    TFL_TensorOf<[F32, I8, I32, I64, UI8, QI8, QUI8]>:$input,
3178    TFL_TensorOf<[I32]>:$block_shape,
3179    TFL_TensorOf<[I32]>:$indices
3180  );
3181
3182  let results = (outs
3183    TFL_TensorOf<[F32, I16, I32, I64, UI8, QI8, QUI8]>:$output
3184  );
3185}
3186
3187def TFL_SpaceToBatchNdOp: TFL_Op<"space_to_batch_nd", [
3188    NoSideEffect,
3189    SameOperandsAndResultsScale,
3190    TFL_OperandHasRankRange<0, 3, 4>,
3191    PredOpTrait<"input and output must have same element type",
3192      TFL_TCresVTEtIsSameAsOp<0, 0>>
3193  ]> {
3194  let summary = "SpaceToBatchNd operator";
3195
3196  let description = [{
3197    This operation reshapes space dimensions into the "batch" dimension 0
3198  }];
3199
3200  let arguments = (ins
3201    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
3202    TFL_I32Tensor:$block_shape,
3203    TFL_I32Tensor:$paddings
3204  );
3205
3206  let results = (outs
3207    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output
3208  );
3209}
3210
3211def TFL_SpaceToDepthOp: TFL_Op<"space_to_depth", [
3212    NoSideEffect,
3213    SameOperandsAndResultsScale,
3214    PredOpTrait<"input and output must have same element type",
3215      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3216    TFL_OperandHasRankAtMost<0, 4>
3217  ]> {
3218  let summary = "SpaceToDepth operator";
3219
3220  let description = [{
3221    Rearranges blocks of spatial data, into depth. More specifically,
3222    this op outputs a copy of the input tensor where values from the `height`
3223    and `width` dimensions are moved to the `depth` dimension.
3224    `block_size` indicates the input block size.
3225   }];
3226
3227  let arguments = (ins
3228    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$input,
3229    Confined<I32Attr, [IntPositive]>:$block_size
3230  );
3231
3232  let results = (outs
3233    TFL_TensorOf<[F32, I32, I64, QI8, QUI8, TFL_Quint8]>:$output
3234  );
3235
3236  let hasOptions = 1;
3237}
3238
3239def TFL_DepthToSpaceOp: TFL_Op<"depth_to_space", [
3240    NoSideEffect,
3241    SameOperandsAndResultsScale,
3242    PredOpTrait<"input and output must have same element type",
3243      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3244    TFL_OperandHasRankAtMost<0, 4>
3245  ]> {
3246  let summary = "DepthToSpace operator";
3247
3248  let description = [{
3249    Rearranges data from depth into blocks of spatial data.
3250    This is the reverse transformation of SpaceToDepth. More specifically,
3251    this op outputs a copy of the input tensor where values from the `depth`
3252    dimension are moved in spatial blocks to the `height` and `width`
3253    dimensions. The attr `block_size` indicates the input block size and how
3254    the data is moved.
3255   }];
3256
3257  let arguments = (ins
3258    TFL_TensorOf<[F32, I8, I32, I64, TFL_Quint8, UI8, QI8, QUI8]>:$input,
3259    Confined<I32Attr, [IntPositive]>:$block_size
3260  );
3261
3262  let results = (outs
3263    TFL_TensorOf<[F32, I8, I32, I64, TFL_Quint8, UI8, QI8, QUI8]>:$output
3264  );
3265
3266  let hasOptions = 1;
3267}
3268
3269def TFL_SplitOp : TFL_Op<"split", [
3270    NoSideEffect,
3271    TFL_Operand0DOr1ElementTensor<0>,
3272    SameOperandsAndResultsScale]> {
3273  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
3274
3275  let description = [{
3276    Splits the `value` tensor along `split_dim` into a number of sub-tensors
3277    with same shape as the original one, except for `split_dim`. Same as
3278    tf.Split.
3279  }];
3280
3281  let arguments = (ins
3282    TFL_TensorOf<[I32]>:$split_dim,
3283    TFL_TensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$value,
3284    Confined<I32Attr, [IntPositive]>:$num_splits
3285  );
3286
3287  let results = (outs
3288    TFL_VariadicTensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$outputs
3289  );
3290
3291  let verifier = [{ return Verify(*this); }];
3292
3293  let hasOptions = 1;
3294}
3295
3296def TFL_SplitVOp : TFL_Op<"split_v", [NoSideEffect, SameOperandsAndResultsScale]> {
3297  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
3298
3299  let description = [{
3300    Splits the `value` tensor along `split_dim` into a number of sub-tensors
3301    with same shape as the original one, except for `split_dim`. The grouping
3302    of the resultant sub-tensors is decided by `size-splits`. Same as tf.SplitV.
3303  }];
3304
3305  let arguments = (ins
3306    TFL_TensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$value,
3307    TFL_1DTensorOf<[I32], [I32]>:$size_splits,
3308    TFL_0DTensorOf<[I32], [I32]>:$split_dim,
3309    Confined<I32Attr, [IntPositive]>:$num_splits
3310  );
3311
3312  let results = (outs
3313    TFL_VariadicTensorOf<[F32, I16, I32, I64, I8, UI8, QI8, QUI8, QI16]>:$outputs
3314  );
3315
3316  let verifier = [{ return Verify(*this); }];
3317
3318  let hasOptions = 1;
3319}
3320
3321def TFL_ResizeBilinearOp: TFL_Op<"resize_bilinear", [
3322    NoSideEffect,
3323    PredOpTrait<"input and output must have same element type",
3324      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3325    TFL_OperandHasRank<0, 4>,
3326    TFL_OperandHasRank<1, 1>,
3327    SameOperandsAndResultsScale]> {
3328  let summary = "ResizeBilinear Op";
3329
3330  let description = [{
3331    Resize `images` to `size` using bilinear interpolation.
3332  }];
3333
3334  let arguments = (ins
3335    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$input,
3336    TFL_I32Tensor:$size,
3337    BoolAttr:$align_corners,
3338    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
3339  );
3340
3341  let results = (outs
3342    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$output
3343  );
3344
3345  let hasOptions = 1;
3346}
3347
3348def TFL_ResizeNearestNeighborOp : TFL_Op<"resize_nearest_neighbor", [
3349    NoSideEffect,
3350    PredOpTrait<"input and output must have same element type",
3351      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3352    TFL_OperandHasRank<0, 4>,
3353    TFL_OperandHasRank<1, 1>,
3354    SameOperandsAndResultsScale]> {
3355  let summary = "ResizeNearestNeighbor Op";
3356
3357  let description = [{
3358    Resize `images` to `size` using nearest neighbor interpolation.
3359  }];
3360
3361  let arguments = (ins
3362    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$input,
3363    TFL_I32Tensor:$size,
3364    BoolAttr:$align_corners,
3365    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
3366  );
3367
3368  let results = (outs
3369    TFL_TensorOf<[F32, TFL_Quint8, QUI8, QI8, QI16]>:$output
3370  );
3371
3372  let hasOptions = 1;
3373}
3374
3375def TFL_SparseToDenseOp : TFL_Op<"sparse_to_dense", [
3376    NoSideEffect,
3377    PredOpTrait<"sparse_values and dense must have same element type",
3378      TFL_TCresVTEtIsSameAsOp<0, 2>>,
3379    PredOpTrait<"default_value and dense must have same element type",
3380      TFL_TCresVTEtIsSameAsOp<0, 3>>,
3381    TFL_OperandHasRankAtMost<0, 2>,
3382    TFL_OperandHasRankAtMost<1, 1>,
3383    TFL_OperandHasRankAtMost<2, 1>]> {
3384  let summary = "Converts a sparse representation into a dense tensor.";
3385
3386  let description = [{
3387Builds an array `dense` with shape `output_shape` such that
3388
3389```
3390# If sparse_indices is scalar
3391dense[i] = (i == sparse_indices ? sparse_values : default_value)
3392
3393# If sparse_indices is a vector, then for each i
3394dense[sparse_indices[i]] = sparse_values[i]
3395
3396# If sparse_indices is an n by d matrix, then for each i in [0, n)
3397dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
3398```
3399
3400All other values in `dense` are set to `default_value`.  If `sparse_values` is a
3401scalar, all sparse indices are set to this single value.
3402
3403Indices should be sorted in lexicographic order, and indices must not
3404contain any repeats. If `validate_indices` is true, these properties
3405are checked during execution.
3406  }];
3407
3408  let arguments = (ins
3409    TFL_I32OrI64Tensor:$sparse_indices,
3410    TFL_I32OrI64Tensor:$output_shape,
3411    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$sparse_values,
3412    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$default_value
3413  );
3414
3415  let results = (outs
3416    TFL_TensorOf<[I32, I64, I8, QI8, UI8, QUI8, TFL_Quint8, F32]>:$dense
3417  );
3418}
3419
3420def TFL_StridedSliceOp: TFL_Op<"strided_slice", [
3421    NoSideEffect,
3422    PredOpTrait<"input and output must have same element type",
3423      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3424    SameOperandsAndResultsScale,
3425    TFL_OperandHasRankAtMost<0, 5>,
3426    TFL_OperandHasRank<1, 1>,
3427    TFL_OperandHasRank<2, 1>,
3428    TFL_OperandHasRank<3, 1>
3429  ]> {
3430  let summary = "StridedSlice Op";
3431
3432  let description = [{
3433    Return a strided slice from `input`.
3434  }];
3435
3436  let arguments = (ins
3437    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, I1, I16, QI16, TFL_Quint8, TFL_Str]>:$input,
3438    TFL_I32Tensor:$begin,
3439    TFL_I32Tensor:$end,
3440    TFL_I32Tensor:$strides,
3441
3442    I32Attr:$begin_mask,
3443    I32Attr:$end_mask,
3444    Confined<I32Attr, [TFL_IntEqualsTo<0>]>:$ellipsis_mask,
3445    Confined<I32Attr, [TFL_IntEqualsTo<0>]>:$new_axis_mask,
3446    I32Attr:$shrink_axis_mask
3447  );
3448
3449  let results = (outs
3450    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8, I1, I16, QI16, TFL_Quint8, TFL_Str]>:$output
3451  );
3452
3453  let hasOptions = 1;
3454}
3455
3456// If there is a change in supporting more types in the TFLite cast op kernel,
3457// the While loop outline pass should be updated since it inserts cast op(s)
3458// after the TF -> TFL legalization pass is done.
3459// LINT.IfChange
3460def TFL_CastOp : TFL_Op<"cast", [
3461    NoSideEffect,
3462    SameOperandsAndResultShape,
3463    NoQuantizableResult]> {
3464  let summary = "Cast operator";
3465
3466  let description = [{
3467    Casts input from input type to output type.
3468  }];
3469
3470  let arguments = (ins
3471    TFL_TensorOf<[F32, I1, I16, I32, I64, TFL_Quint8, UI8, Complex<F<32>>]>:$input
3472  );
3473
3474  let results = (outs TFL_TensorOf<[F32, I1, I16, I32, I64, TFL_Quint8, UI8, Complex<F<32>>]>:$output);
3475
3476  // TFLite's cast op does not utilize CastOptions, instead derives types
3477  // from the TfLiteTensors.
3478  let hasOptions = 0;
3479
3480  let hasFolder = 1;
3481}
3482// LINT.ThenChange(//tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc)
3483
3484def TFL_MirrorPadOp: TFL_Op<"mirror_pad", [
3485                     NoSideEffect, TFL_OperandHasRank<1, 2>]> {
3486  let summary = "MirrorPad Operator. Pads a tensor with mirrored values.";
3487
3488  let description = [{
3489    This operation pads a input with mirrored values according to the paddings
3490    you specify. paddings is an integer tensor with shape [n, 2],
3491    where n is the rank of input.
3492    For each dimension D of input, paddings[D, 0] indicates how many values
3493    to add before the contents of input in that dimension,
3494    and paddings[D, 1] indicates how many values to add after the contents of
3495    input in that dimension.
3496
3497    Both paddings[D, 0] and paddings[D, 1] must be no greater than
3498    input.dim_size(D) (or input.dim_size(D) - 1)
3499    if copy_border is true (if false, respectively).
3500
3501    The padded size of each dimension D of the output is:
3502
3503    paddings(D, 0) + input.dim_size(D) + paddings(D, 1)
3504  }];
3505
3506  let arguments = (ins
3507    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$input,
3508    TFL_TensorOf<[I32, I64]>:$pad,
3509    TFL_MirrorPaddingAttr:$mode
3510  );
3511
3512  let results = (outs
3513    TFL_TensorOf<[F32, I32, I64, I8, UI8, QI8, QUI8]>:$output
3514  );
3515
3516  let hasOptions = 1;
3517}
3518
3519def TFL_UniqueOp: TFL_Op<"unique", [
3520    TFL_OperandHasRank<0, 1>,
3521    NoSideEffect]> {
3522  let summary = "Unique Op.";
3523
3524  let description = [{
3525  This operation returns a tensor `output` containing all of the unique elements
3526of `input` sorted in the same order that they occur in `input`. This operation
3527also returns a tensor `idx` the same size as `x` that contains the index of each
3528value of `input` in the unique output `output`. In other words:
3529  }];
3530
3531  let arguments = (ins
3532    TFL_TensorOf<[I8, QI8, UI8, QUI8, I16, QI16, I32, I64, F32]>:$input
3533  );
3534
3535  let results = (outs
3536    TFL_TensorOf<[I8, QI8, UI8, QUI8, I16, QI16, I32, I64, F32]>:$output,
3537    TFL_I32OrI64Tensor:$idx
3538  );
3539
3540  DerivedTFLiteTypeAttr idx_out_type = DerivedTFLiteTypeAttr<[{
3541    return getResult(1).getType().cast<TensorType>().getElementType().
3542        cast<IntegerType>().getWidth() > 32 ? tflite::TensorType_INT64 :
3543            tflite::TensorType_INT32;
3544    }], [{
3545      TypeAttr::get(getResult(1).getType().cast<TensorType>().getElementType())
3546    }]>;
3547
3548  let hasOptions = 1;
3549}
3550
3551//===----------------------------------------------------------------------===//
3552// Quantization ops.
3553//===----------------------------------------------------------------------===//
3554def TFL_DequantizeOp: TFL_Op<"dequantize", [NoQuantizableResult]> {
3555  let summary = "Dequantize operator";
3556
3557  let description = [{
3558    Converts quantized array of integers to floating-points according to the
3559    quantization parameters.
3560  }];
3561
3562  let arguments = (ins TFL_TensorOf<[QI8, QUI8, QI16, F16]>:$input);
3563
3564  let results = (outs TFL_FpTensor:$output);
3565}
3566
3567def TFL_FakeQuantOp : TFL_Op<"fake_quant", [NoSideEffect]> {
3568  let summary = "FakeQuant operator";
3569
3570  let description = [{
3571    Fake-quantize the 'inputs' tensor of type float via float scalars min and
3572    max to 'outputs' tensor of same shape as inputs.
3573  }];
3574
3575  let arguments = (
3576    ins TFL_FpTensor:$input,
3577    // The expected [min, max] range of values.
3578    F32Attr:$min,
3579    F32Attr:$max,
3580
3581    // The bitwidth of the quantization; between 2 and 16, inclusive.
3582    Confined<I32Attr, [IntMinValue<2>, IntMaxValue<16>]>:$num_bits,
3583    // Quantization range starts from 0 or 1; starts from 1 if true.
3584    Confined<BoolAttr, [TFL_BoolFalse]>:$narrow_range);
3585
3586  let results = (outs TFL_FpTensor:$output);
3587
3588  let hasCanonicalizer = 0b1;
3589
3590  let hasOptions = 1;
3591}
3592
3593def TFL_QConstOp : Op<TFL_Dialect, "pseudo_qconst", [
3594    NoSideEffect, FirstAttrDerivedResultType, NoQuantizableResult]> {
3595  let summary = "Quantized constant pseudo op";
3596
3597  let description = [{
3598    Represents a quantized constant value in TensorFlow Lite dialect. This is
3599    not an actual operation and it will be lowered to buffer instead. The
3600    quantization parameters are stored as a type attribute in this constant.
3601  }];
3602
3603  let arguments = (
3604    ins TensorTypeAttr:$qtype,
3605    ElementsAttr:$value
3606  );
3607
3608  let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
3609
3610  let builders = [
3611    OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value),
3612    [{
3613      $_state.addAttribute("qtype", qtype);
3614      $_state.addAttribute("value", value);
3615      $_state.addTypes(qtype.getValue());
3616    }]>
3617  ];
3618}
3619
3620def TFL_SparseQConstOp : Op<TFL_Dialect, "pseudo_sparse_qconst", [
3621    NoSideEffect, FirstAttrDerivedResultType, NoQuantizableResult]> {
3622  let summary = "Sparse quantized constant pseudo op";
3623
3624  let description = [{
3625    Represents a sparse quantized constant value in TensorFlow Lite dialect.
3626    This is not an actual operation and it will be lowered to buffer instead.
3627    The quantization parameters are stored as a type attribute in this constant.
3628  }];
3629
3630  let arguments = (
3631    ins TensorTypeAttr:$qtype,
3632    ElementsAttr:$value,
3633    SparsityParameterAttr:$s_param,
3634    ElementsAttr:$compressed_data
3635  );
3636
3637  let results = (outs TFL_TensorOf<[QUI8, QI8, QI16, QUI16, TFL_Quint8]>:$output);
3638
3639  let builders = [
3640    OpBuilderDAG<(ins "TypeAttr":$qtype, "Attribute":$value,
3641      "SparsityParameterAttr":$s_param, "Attribute":$compressed_data),
3642    [{
3643      $_state.addTypes(qtype.getValue());
3644      $_state.addAttribute("qtype", qtype);
3645      $_state.addAttribute("value", value);
3646      $_state.addAttribute("s_param", s_param);
3647      $_state.addAttribute("compressed_data", compressed_data);
3648    }]>
3649  ];
3650}
3651
3652def TFL_QuantizeOp: TFL_Op<"quantize", [
3653    FirstAttrDerivedResultType,
3654    SameOperandsAndResultShape,
3655    NoQuantizableResult]> {
3656  let summary = "Quantize operator";
3657
3658  let description = [{
3659    Converts floating point tensors to quantized integer tensors according to
3660    the quantization parameters defined in the type attribute.
3661  }];
3662
3663  let arguments = (
3664    ins TFL_TensorOf<[F32, QI8, QUI8, QI16, TFL_Quint8]>:$input,
3665    TensorTypeAttr:$qtype
3666  );
3667
3668  let results = (outs TFL_TensorOf<[QI8, QUI8, QI16, TFL_Quint8]>:$output);
3669}
3670
3671def TFL_DensifyOp: TFL_Op<"densify", [
3672    NoSideEffect,
3673    PredOpTrait<"input and output must have same element type",
3674      TFL_TCresVTEtIsSameAsOp<0, 0>>,
3675    NoQuantizableResult]> {
3676  let summary = "Densify operator";
3677
3678  let description = [{
3679    Converts sparse tensor to dense format.
3680  }];
3681
3682  let arguments = (ins TFL_TensorOf<[F32, I8]>:$input);
3683
3684  let results = (outs TFL_TensorOf<[F32, I8]>:$output);
3685}
3686
3687//===----------------------------------------------------------------------===//
3688// LSTM Ops
3689//===----------------------------------------------------------------------===//
3690
3691// LSTM Kernel Type attributes
3692def TFL_LSTM_KT_FULL  : StrEnumAttrCase<"FULL">;
3693def TFL_LSTM_KT_BASIC  : StrEnumAttrCase<"BASIC">;
3694
3695def TFL_LSTMKernelTypeAttr : StrEnumAttr<"LSTMKernelType", "lstm kernel type enum",
3696   [
3697     TFL_LSTM_KT_FULL,  TFL_LSTM_KT_BASIC
3698   ]>;
3699
3700def LstmMandatoryInputsConstraint : PredOpTrait<
3701  "mandatory operands element types should match",
3702  // TODO(ashwinm): Replace the indices with input tensor names when that
3703  // support is available.
3704  Or<[
3705    TCopVTEtAreSameAt<[0, 2, 3, 4, 6, 7, 8, 13, 14, 15, 18, 19]>,
3706    Neg<TypeIsPred<"input", F32>>]>>;
3707
3708def LstmOptionalPeepholeWeightConstraint : PredOpTrait<
3709  "the optional peephole weights should all be specified or none",
3710  // Ignore input 9 (cell_to_input_weights) for LSTM with CIFG.
3711  And<[
3712    TFL_TCopVTEtAreSameAt<10, 11, 16>,
3713    Or<[TFL_TCopVTEtAreSameAt<9, 10, 16>,
3714        And<[TypeIsPred<"input_to_input_weights", NoneType>,
3715             TypeIsPred<"cell_to_input_weights", NoneType>]>]>]>>;
3716
3717def LstmProjectionWeightBiasConstraint : PredOpTrait<
3718  "either projection weight must be specified or both projection weight and "
3719  "projection bias must not be specified",
3720   Or<[
3721      And<[TypeIsPred<"projection_weights", NoneType>,
3722           TypeIsPred<"projection_bias", NoneType>]>,
3723      Neg<TypeIsPred<"projection_weights", NoneType>>]>>;
3724
3725def LstmCifgInputConstraint : PredOpTrait<
3726  "the cifg inputs should all be specified or none",
3727   // If LSTM has combined input/forget gate, input 1, 5, 9, 12, 20 are all none
3728   // or 1, 5, 12 should not be none. Inputs 9 and 20 depend on LSTM's variants.
3729   Or<[
3730     And<[TypeIsPred<"input_to_input_weights", NoneType>,
3731          TypeIsPred<"recurrent_to_input_weights", NoneType>,
3732          TypeIsPred<"cell_to_input_weights", NoneType>,
3733          TypeIsPred<"input_gate_bias", NoneType>,
3734          TypeIsPred<"input_layer_norm_coefficients", NoneType>]>,
3735     Neg<Or<[
3736       TypeIsPred<"input_to_input_weights", NoneType>,
3737       TypeIsPred<"recurrent_to_input_weights", NoneType>,
3738       TypeIsPred<"input_gate_bias", NoneType>]>>]>>;
3739
3740
3741// TODO(b/137798843): Need to add an additional constraint for both LSTM and
3742// UnidirectionalSequenceLstm
3743// For layer norm: if layer norm is false, tensor {20, 21, 22, 23}
3744// are null; if layer norm is true, tensors {21, 22, 23} are not null; tensor
3745// {20} is not null if additionally cifg = false.
3746
3747def LstmResultConstraint : PredOpTrait<
3748  "the input and result tensor elemental types must be same",
3749  TFL_TCresVTEtIsSameAsOp<0, 0>>;
3750
3751// This is the basic kernel type LSTM op.
3752// TODO(b/142417845): Refactor this part to return its tflite node name as
3753// "lstm".
3754def TFL_BasicLSTMOp : TFL_Op<"basic_lstm", [NoSideEffect,
3755    TFL_OperandHasRank<0, 2>, TFL_OperandHasRank<1, 2>, TFL_OperandHasRank<2, 2>,
3756    TFL_OperandHasRank<3, 1>, TFL_OperandHasRank<4, 2>]> {
3757  let summary = "The basic lstm operator";
3758
3759  let description = [{
3760    basic LSTM Cell Operator.
3761  }];
3762
3763  let arguments = (
3764    ins TFL_TensorOf<[F32, QUI8]>:$data_input,
3765    TFL_TensorOf<[F32, QUI8]>:$prev_activ_input,
3766    TFL_TensorOf<[F32, QUI8]>:$weights_input,
3767    TFL_TensorOf<[F32, QI32]>:$biases_input,
3768    TFL_TensorOf<[F32, QI16]>:$prev_state_input,
3769
3770    // Attributes
3771    DefaultValuedAttr<TFL_AFAttr, "TANH">:$fused_activation_function,
3772    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
3773    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
3774    // Since this op is the BASIC kernel only, constrain it.
3775    Confined<
3776      DefaultValuedAttr<TFL_LSTMKernelTypeAttr, "BASIC">,
3777      [TFL_LSTM_KT_BASIC]>:$kernel_type
3778  );
3779
3780  let hasOptions = 1;
3781
3782  let results = (outs TFL_2DTensorOf<[F32, QUI8]>:$activ_output,
3783                      TFL_2DTensorOf<[F32, QUI16]>:$state_output,
3784                      TFL_2DTensorOf<[F32, QUI8]>:$concat_temp,
3785                      TFL_2DTensorOf<[F32, QUI16]>:$activ_temp);
3786}
3787
3788// This is the FULL kernel type LSTM op.
3789def TFL_LSTMOp :
3790  TFL_Op<"lstm",
3791          [LstmMandatoryInputsConstraint,
3792           LstmOptionalPeepholeWeightConstraint,
3793           LstmProjectionWeightBiasConstraint,
3794           LstmCifgInputConstraint,
3795           LstmResultConstraint,
3796           TFL_OperandHasRank<2, 2>,           // input_to_forget_weights
3797           TFL_OperandHasRank<3, 2>,           // input_to_cell_weights
3798           TFL_OperandIsNoneOrHasRank<5, 2>,   // recurrent_to_input_weights
3799           TFL_OperandHasRank<6, 2>,           // recurrent_to_forget_weights
3800           TFL_OperandHasRank<7, 2>,           // recurrent_to_cell_weights
3801           TFL_OperandIsNoneOrHasRank<9, 1>,   // cell_to_input_weights
3802           TFL_OperandIsNoneOrHasRank<10, 1>,  // cell_to_forget_weights
3803           TFL_OperandIsNoneOrHasRank<11, 1>,  // cell_to_output_weights
3804           TFL_OperandHasRank<13, 1>,          // forget_gate_bias
3805           TFL_OperandHasRank<14, 1>,          // cell_gate_bias
3806           TFL_OperandHasRank<15, 1>,          // output_gate_bias
3807           TFL_OperandIsNoneOrHasRank<16, 2>,  // projection_weights
3808           TFL_OperandIsNoneOrHasRank<17, 1>,  // projection_bias
3809           TFL_StatefulOp]> {
3810  let summary = "The full lstm operator";
3811
3812  let description = [{
3813Long short-term memory unit (LSTM) recurrent network layer.
3814The default non-peephole implementation is based on:
3815http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
3816S. Hochreiter and J. Schmidhuber. 'Long Short-Term Memory'. Neural Computation,
38179(8):1735-1780, 1997.
3818The peephole implementation is based on:
3819https://research.google.com/pubs/archive/43905.pdf
3820Hasim Sak, Andrew Senior, and Francoise Beaufays. 'Long short-term memory
3821recurrent neural network architectures for large scale acoustic modeling.'
3822INTERSPEECH, 2014.
3823The coupling of input and forget gate (CIFG) is based on:
3824http://arxiv.org/pdf/1503.04069.pdf
3825Greff et al. 'LSTM: A Search Space Odyssey'
3826The layer normalization is based on:
3827https://arxiv.org/pdf/1607.06450.pdf
3828Ba et al. 'Layer Normalization'
3829  }];
3830
3831  let arguments = (
3832    ins TFL_TensorOf<[F32, QI8]>:$input,
3833
3834    // Weights
3835    TFL_TensorOfOrNone<[F32, QI8]>:$input_to_input_weights,
3836    TFL_TensorOf<[F32, QI8]>:$input_to_forget_weights,
3837    TFL_TensorOf<[F32, QI8]>:$input_to_cell_weights,
3838    TFL_TensorOf<[F32, QI8]>:$input_to_output_weights,
3839
3840    // Recurrent weights
3841    TFL_TensorOfOrNone<[F32, QI8]>:$recurrent_to_input_weights,
3842    TFL_TensorOf<[F32, QI8]>:$recurrent_to_forget_weights,
3843    TFL_TensorOf<[F32, QI8]>:$recurrent_to_cell_weights,
3844    TFL_TensorOf<[F32, QI8]>:$recurrent_to_output_weights,
3845
3846    // Cell weights
3847    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_input_weights,
3848    // Optional input
3849    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_forget_weights,
3850    // Optional input
3851    TFL_TensorOfOrNone<[F32, QI8, QI16]>:$cell_to_output_weights,
3852
3853    // Bias
3854    TFL_TensorOfOrNone<[F32, QI32]>:$input_gate_bias,
3855    TFL_TensorOf<[F32, QI32]>:$forget_gate_bias,
3856    TFL_TensorOf<[F32, QI32]>:$cell_bias,
3857    TFL_TensorOf<[F32, QI32]>:$output_gate_bias,
3858
3859    // Projection weight and bias
3860    TFL_TensorOfOrNone<[F32, QI8]>:$projection_weights,
3861    // Optional input
3862    TFL_TensorOfOrNone<[F32, QI32]>:$projection_bias,
3863
3864    // Stateful activation and cell states.
3865    TFL_StatefulTensor:$input_activation_state,
3866    TFL_StatefulTensor:$input_cell_state,
3867
3868    // Layer norm coefficients
3869    TFL_TensorOfOrNone<[F32, QI16]>:$input_layer_norm_coefficients,
3870    TFL_TensorOfOrNone<[F32, QI16]>:$forget_layer_norm_coefficients,
3871    TFL_TensorOfOrNone<[F32, QI16]>:$cell_layer_norm_coefficients,
3872    TFL_TensorOfOrNone<[F32, QI16]>:$output_layer_norm_coefficients,
3873
3874    // Attributes
3875    TFL_AFAttr:$fused_activation_function,
3876    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
3877    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
3878    // Since this op is the FULL kernel only, constrain it.
3879    Confined<
3880      DefaultValuedAttr<TFL_LSTMKernelTypeAttr, "FULL">,
3881      [TFL_LSTM_KT_FULL]>:$kernel_type,
3882
3883    // Types of the optional intermediate tensors, which exist for fully
3884    // quantized LSTM op and hold the ranges of the intermediate tensors.
3885    // The type for intermediate tenssors are be quant.calibrated when imported
3886    // to only store calibrated min, max values. The proper quantization spec is
3887    // determined while going through quantization passes.
3888    OptionalAttr<TypeAttr>:$input_to_input_intermediate,
3889    OptionalAttr<TypeAttr>:$input_to_forget_intermediate,
3890    OptionalAttr<TypeAttr>:$input_to_cell_intermediate,
3891    OptionalAttr<TypeAttr>:$input_to_output_intermediate,
3892    OptionalAttr<TypeAttr>:$effective_hidden_scale_intermediate
3893  );
3894
3895  let results = (outs AnyTensor:$output);
3896
3897  // TODO(fengliuai): customize printer and parser to not display
3898  // empty region.
3899  let regions = (region AnyRegion:$internal);
3900
3901  let hasOptions = 1;
3902
3903  let hasCanonicalizer = 1;
3904
3905  let verifier = [{ return Verify(*this); }];
3906
3907  let extraClassDeclaration = [{
3908    // StatefulOpInterface:
3909    std::vector<int> GetStatefulOperands() { return {18, 19}; }
3910  }];
3911}
3912
3913// UnidirectionalSequenceLstm op.
3914// TODO(ashwinm): Add constraint to validate the combination of operands
3915// that are valid for hybrid vs fully quantized vs float only semantics
3916def TFL_UnidirectionalSequenceLSTMOp :
3917  TFL_Op<"unidirectional_sequence_lstm",
3918          [LstmMandatoryInputsConstraint,
3919           LstmOptionalPeepholeWeightConstraint,
3920           LstmProjectionWeightBiasConstraint,
3921           LstmCifgInputConstraint,
3922           LstmResultConstraint,
3923           TFL_OperandHasRankAtLeast<0, 2>,    // input
3924           TFL_OperandIsNoneOrHasRank<1, 2>,   // input_to_input_weights
3925           TFL_OperandHasRank<2, 2>,           // input_to_forget_weights
3926           TFL_OperandHasRank<3, 2>,           // input_to_cell_weights
3927           TFL_OperandHasRank<4, 2>,           // input_to_output_weights
3928           TFL_OperandIsNoneOrHasRank<5, 2>,   // recurrent_to_input_weights
3929           TFL_OperandHasRank<6, 2>,           // recurrent_to_forget_weights
3930           TFL_OperandHasRank<7, 2>,           // recurrent_to_cell_weights
3931           TFL_OperandHasRank<8, 2>,           // recurrent_to_output_weights
3932           TFL_OperandIsNoneOrHasRank<9, 1>,   // cell_to_input_weights
3933           TFL_OperandIsNoneOrHasRank<10, 1>,  // cell_to_forget_weights
3934           TFL_OperandIsNoneOrHasRank<11, 1>,  // cell_to_output_weights
3935           TFL_OperandIsNoneOrHasRank<12, 1>,  // input_gate_bias
3936           TFL_OperandHasRank<13, 1>,          // forget_gate_bias
3937           TFL_OperandHasRank<14, 1>,          // cell_gate_bias
3938           TFL_OperandHasRank<15, 1>,          // output_gate_bias
3939           TFL_OperandIsNoneOrHasRank<16, 2>,  // projection_weights
3940           TFL_OperandIsNoneOrHasRank<17, 1>,  // projection_bias
3941           TFL_StatefulOp]> {
3942  let summary = "Unidirectional sequence lstm operator";
3943
3944  let description = [{
3945    A recurrent neural network specified by an LSTM cell. This Op supports
3946    unrolling the input along the time or batch dimensions, and
3947    implements the following operation for
3948    each element in the sequence s = 1...sequence_length:
3949      outputs[s] = state = activation(LSTMOp(inputs[s]))
3950
3951    where LSTMOp is LSTM TF Lite Op and the “activation” is the function passed
3952    as the “fused_activation_function” argument (if not “NONE”).
3953  }];
3954
3955  let arguments = (
3956    ins TFL_FpTensor:$input,
3957
3958    // Weights
3959    TFL_TensorOfOrNone<[F32, QI8]>:$input_to_input_weights,
3960    TFL_TensorOf<[F32, QI8]>:$input_to_forget_weights,
3961    TFL_TensorOf<[F32, QI8]>:$input_to_cell_weights,
3962    TFL_TensorOf<[F32, QI8]>:$input_to_output_weights,
3963
3964    // Recurrent weights
3965    TFL_TensorOfOrNone<[F32, QI8]>:$recurrent_to_input_weights,
3966    TFL_TensorOf<[F32, QI8]>:$recurrent_to_forget_weights,
3967    TFL_TensorOf<[F32, QI8]>:$recurrent_to_cell_weights,
3968    TFL_TensorOf<[F32, QI8]>:$recurrent_to_output_weights,
3969
3970    // Cell weights
3971    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_input_weights,
3972    // Optional input
3973    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_forget_weights,
3974    // Optional input
3975    TFL_TensorOfOrNone<[F32, QI8]>:$cell_to_output_weights,
3976
3977    // Bias
3978    TFL_TensorOfOrNone<[F32]>:$input_gate_bias,
3979    TFL_FpTensor:$forget_gate_bias,
3980    TFL_FpTensor:$cell_bias,
3981    TFL_FpTensor:$output_gate_bias,
3982
3983    // Projection weight and bias
3984    TFL_TensorOfOrNone<[F32, QI8]>:$projection_weights,
3985    // Optional input
3986    TFL_TensorOfOrNone<[F32]>:$projection_bias,
3987
3988    // Stateful activation and cell states.
3989    TFL_StatefulTensor:$input_activation_state,
3990    TFL_StatefulTensor:$input_cell_state,
3991
3992    // Layer norm coefficients
3993    TFL_TensorOfOrNone<[F32, QI8]>:$input_layer_norm_coefficients,
3994    TFL_TensorOfOrNone<[F32, QI8]>:$forget_layer_norm_coefficients,
3995    TFL_TensorOfOrNone<[F32, QI8]>:$cell_layer_norm_coefficients,
3996    TFL_TensorOfOrNone<[F32, QI8]>:$output_layer_norm_coefficients,
3997
3998    // Attributes
3999    TFL_AFAttr:$fused_activation_function,
4000    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4001    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4002    BoolAttr:$time_major,
4003
4004    // Types of the optional intermediate tensors, which exist for fully
4005    // quantized op and hold the ranges of the intermediate tensors.
4006    // The type for intermediate tenssors are be quant.calibrated when imported
4007    // to only store calibrated min, max values. The proper quantization spec is
4008    // determined while going through quantization passes.
4009    OptionalAttr<TypeAttr>:$input_to_input_intermediate,
4010    OptionalAttr<TypeAttr>:$input_to_forget_intermediate,
4011    OptionalAttr<TypeAttr>:$input_to_cell_intermediate,
4012    OptionalAttr<TypeAttr>:$input_to_output_intermediate,
4013    OptionalAttr<TypeAttr>:$effective_hidden_scale_intermediate
4014  );
4015
4016  let results = (outs TFL_TensorOf<[F32, QI8]>:$output);
4017
4018  let hasOptions = 1;
4019
4020  let verifier = [{ return Verify(*this); }];
4021
4022  let extraClassDeclaration = [{
4023    // StatefulOpInterface:
4024    std::vector<int> GetStatefulOperands() { return {18, 19}; }
4025  }];
4026}
4027
4028def BidiLstmMandatoryInputsConstraint : PredOpTrait<
4029  "mandatory operands element types should match",
4030  // TODO(ashwinm): Replace the indices with input tensor names when that
4031  // support is available.
4032  Or<[
4033    TCopVTEtAreSameAt<[0, 2, 3, 4, 6, 7, 8, 13, 14, 15, 19, 20, 21, 23, 24, 25,
4034                       30, 31, 32, 35, 36, 37, 38]>,
4035    Neg<TypeIsPred<"input", F32>>]>>;
4036
4037// TODO(b/172517537): support quantized types
4038def BidiLstmOptionalPeepholeWeightConstraint : PredOpTrait<
4039  "the optional peephole weights should all be specified or none",
4040  TCopVTEtAreSameAt<[9, 10, 11, 26, 27, 28]>>;
4041
4042def BidiLstmProjectionWeightBiasConstraint : PredOpTrait<
4043  "either projection weight must be specified or both projection weight and "
4044  "projection bias must not be specified",
4045   Or<[
4046      And<[TypeIsPred<"fw_projection_weights", NoneType>,
4047           TypeIsPred<"fw_projection_bias", NoneType>,
4048           TypeIsPred<"bw_projection_weights", NoneType>,
4049           TypeIsPred<"bw_projection_bias", NoneType>]>,
4050      And<[
4051        Neg<TypeIsPred<"fw_projection_weights", NoneType>>,
4052        Neg<TypeIsPred<"bw_projection_weights", NoneType>>,
4053     ]>
4054   ]>>;
4055
4056// BidirectionalSequenceLstm op.
4057// TODO(ashwinm): Add constraint to validate the combination of operands
4058// that are valid for hybrid vs fully quantized vs float only semantics
4059def TFL_BidirectionalSequenceLSTMOp :
4060  TFL_Op<"bidirectional_sequence_lstm",
4061          [BidiLstmMandatoryInputsConstraint,
4062           BidiLstmOptionalPeepholeWeightConstraint,
4063           BidiLstmProjectionWeightBiasConstraint,
4064           LstmResultConstraint,
4065           TFL_OperandHasRank<0, 3>,   // input
4066           TFL_OperandHasRank<1, 2>,   // fw_input_to_input_weights
4067           TFL_OperandHasRank<2, 2>,   // fw_input_to_forget_weights
4068           TFL_OperandHasRank<3, 2>,   // fw_input_to_cell_weights
4069           TFL_OperandHasRank<4, 2>,   // fw_input_to_output_weights
4070           TFL_OperandHasRank<5, 2>,   // fw_recurrent_to_input_weights
4071           TFL_OperandHasRank<6, 2>,   // fw_recurrent_to_forget_weights
4072           TFL_OperandHasRank<7, 2>,   // fw_recurrent_to_cell_weights
4073           TFL_OperandHasRank<8, 2>,   // fw_recurrent_to_output_weights
4074           TFL_OperandHasRank<9, 1>,   // fw_cell_to_input_weights
4075           TFL_OperandHasRank<10, 1>,  // fw_cell_to_forget_weights
4076           TFL_OperandHasRank<11, 1>,  // fw_cell_to_output_weights
4077           TFL_OperandHasRank<12, 1>,  // fw_input_gate_bias
4078           TFL_OperandHasRank<13, 1>,  // fw_forget_gate_bias
4079           TFL_OperandHasRank<14, 1>,  // fw_cell_bias
4080           TFL_OperandHasRank<15, 1>,  // fw_output_gate_bias
4081           TFL_OperandHasRank<16, 2>,  // fw_projection_weights
4082           TFL_OperandHasRank<17, 1>,  // fw_projection_bias
4083           TFL_OperandHasRank<18, 2>,  // bw_input_to_input_weights
4084           TFL_OperandHasRank<19, 2>,  // bw_input_to_forget_weights
4085           TFL_OperandHasRank<20, 2>,  // bw_input_to_cell_weights
4086           TFL_OperandHasRank<21, 2>,  // bw_input_to_output_weights
4087           TFL_OperandHasRank<22, 2>,  // bw_recurrent_to_input_weights
4088           TFL_OperandHasRank<23, 2>,  // bw_recurrent_to_forget_weights
4089           TFL_OperandHasRank<24, 2>,  // bw_recurrent_to_cell_weights
4090           TFL_OperandHasRank<25, 2>,  // bw_recurrent_to_output_weights
4091           TFL_OperandHasRank<26, 1>,  // bw_cell_to_input_weights
4092           TFL_OperandHasRank<27, 1>,  // bw_cell_to_forget_weights
4093           TFL_OperandHasRank<28, 1>,  // bw_cell_to_output_weights
4094           TFL_OperandHasRank<29, 1>,  // bw_input_gate_bias
4095           TFL_OperandHasRank<30, 1>,  // bw_forget_gate_bias
4096           TFL_OperandHasRank<31, 1>,  // bw_cell_bias
4097           TFL_OperandHasRank<32, 1>,  // bw_output_gate_bias
4098           TFL_OperandHasRank<33, 2>,  // bw_projection_weights
4099           TFL_OperandHasRank<34, 1>,  // bw_projection_bias
4100           TFL_StatefulOp]> {
4101  let summary = "Bidirectional sequence lstm operator";
4102
4103  let description = [{
4104    Bidirectional lstm is essentially two lstms, one running forward & the
4105    other running backward. And the output is the concatenation of the two
4106    lstms.
4107  }];
4108
4109  let arguments = (
4110    ins TFL_TensorOf<[F32, I8]>:$input,
4111
4112    // Forward LSTM Weights
4113    TFL_TensorOfOrNone<[F32, I8]>:$fw_input_to_input_weights,
4114    TFL_TensorOf<[F32, I8]>:$fw_input_to_forget_weights,
4115    TFL_TensorOf<[F32, I8]>:$fw_input_to_cell_weights,
4116    TFL_TensorOf<[F32, I8]>:$fw_input_to_output_weights,
4117
4118    // Forward Recurrent weights
4119    TFL_TensorOfOrNone<[F32, I8]>:$fw_recurrent_to_input_weights,
4120    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_forget_weights,
4121    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_cell_weights,
4122    TFL_TensorOf<[F32, I8]>:$fw_recurrent_to_output_weights,
4123
4124    // Forward Cell weights
4125    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_input_weights,
4126    // Optional Forward cell weights
4127    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_forget_weights,
4128    // Optional Forward cell weights
4129    TFL_TensorOfOrNone<[F32, I8]>:$fw_cell_to_output_weights,
4130
4131    // Forward Bias
4132    TFL_TensorOfOrNone<[F32]>:$fw_input_gate_bias,
4133    TFL_TensorOf<[F32]>:$fw_forget_gate_bias,
4134    TFL_TensorOf<[F32]>:$fw_cell_bias,
4135    TFL_TensorOf<[F32]>:$fw_output_gate_bias,
4136
4137    // Forward Projection weight and bias
4138    TFL_TensorOfOrNone<[F32, I8]>:$fw_projection_weights,
4139    // Forward Optional input
4140    TFL_TensorOfOrNone<[F32]>:$fw_projection_bias,
4141
4142    // Backward LSTM Weights
4143    TFL_TensorOfOrNone<[F32, I8]>:$bw_input_to_input_weights,
4144    TFL_TensorOf<[F32, I8]>:$bw_input_to_forget_weights,
4145    TFL_TensorOf<[F32, I8]>:$bw_input_to_cell_weights,
4146    TFL_TensorOf<[F32, I8]>:$bw_input_to_output_weights,
4147
4148    // Backward Recurrent weights
4149    TFL_TensorOfOrNone<[F32, I8]>:$bw_recurrent_to_input_weights,
4150    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_forget_weights,
4151    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_cell_weights,
4152    TFL_TensorOf<[F32, I8]>:$bw_recurrent_to_output_weights,
4153
4154    // Backward Cell weights
4155    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_input_weights,
4156    // Optional Forward cell weights
4157    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_forget_weights,
4158    // Optional Forward cell weights
4159    TFL_TensorOfOrNone<[F32, I8]>:$bw_cell_to_output_weights,
4160
4161    // Backward Bias
4162    TFL_TensorOfOrNone<[F32]>:$bw_input_gate_bias,
4163    TFL_TensorOf<[F32]>:$bw_forget_gate_bias,
4164    TFL_TensorOf<[F32]>:$bw_cell_bias,
4165    TFL_TensorOf<[F32]>:$bw_output_gate_bias,
4166
4167    // Backward Projection weight and bias
4168    TFL_TensorOfOrNone<[F32, I8]>:$bw_projection_weights,
4169    // Backward Optional input
4170    TFL_TensorOfOrNone<[F32]>:$bw_projection_bias,
4171
4172    // Stateful activation and cell states.
4173    TFL_StatefulTensor:$fw_input_activation_state,
4174    TFL_StatefulTensor:$fw_input_cell_state,
4175    TFL_StatefulTensor:$bw_input_activation_state,
4176    TFL_StatefulTensor:$bw_input_cell_state,
4177
4178    // Auxiliary input & weights.
4179    TFL_TensorOfOrNone<[F32, I8]>:$aux_input,
4180    // Auxiliary fw weights.
4181    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_input_weights,
4182    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_forget_weights,
4183    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_cell_weights,
4184    TFL_TensorOfOrNone<[F32, I8]>:$fw_aux_input_to_output_weights,
4185    // Auxiliary bw weights.
4186    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_input_weights,
4187    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_forget_weights,
4188    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_cell_weights,
4189    TFL_TensorOfOrNone<[F32, I8]>:$bw_aux_input_to_output_weights,
4190
4191    // Attributes
4192    TFL_AFAttr:$fused_activation_function,
4193    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$cell_clip,
4194    Confined<DefaultValuedAttr<F32Attr, "0.0f">, [TFL_FloatNonNegative]>:$proj_clip,
4195    BoolAttr:$merge_outputs,
4196    BoolAttr:$time_major
4197  );
4198
4199  let results = (outs
4200    AnyTensor:$fw_output,
4201    AnyTensor:$bw_output
4202  );
4203
4204  let hasOptions = 1;
4205
4206  let verifier = [{ return Verify(*this); }];
4207
4208  let extraClassDeclaration = [{
4209    // StatefulOpInterface:
4210    std::vector<int> GetStatefulOperands() { return {35, 36, 37, 38}; }
4211  }];
4212}
4213
4214// UnidirectionalSequenceRNN op.
4215def TFL_UnidirectionalSequenceRNNOp : TFL_Op<"unidirectional_sequence_rnn", [
4216    TFL_OperandHasRank<4, 2>,
4217    PredOpTrait<"input and output must have same element type",
4218      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4219    PredOpTrait<"input and constant value operands must have same element type",
4220      TFL_TCopVTEtAreSameAt<1, 2>>,
4221    TFL_StatefulOp]> {
4222  let summary = "Unidirectional sequence rnn operator";
4223
4224  let description = [{
4225    A recurrent neural network specified by an RNN cell. This Op takes in input
4226    in a format {batch_size, seq_len, input_size} or
4227    {seq_len, batch_size, input_size} if it's time-majored.
4228
4229    It implements the following operation for
4230    each element in the sequence s = 1...sequence_length:
4231      outputs[s] = state = activation(RNNOp(inputs[s]))
4232
4233    where RNNOp is RNNOp TF Lite Op and the “activation” is the function passed
4234    as the “fused_activation_function” argument (if not “NONE”).
4235  }];
4236
4237  let arguments = (
4238    ins TFL_FpTensor:$input,
4239
4240    // Weights
4241    TFL_TensorOf<[F32, QI8]>:$input_to_input_weights,
4242
4243    // Recurrent weights
4244    TFL_TensorOf<[F32, QI8]>:$recurrent_to_input_weights,
4245
4246    // Bias
4247    TFL_FpTensor:$input_gate_bias,
4248
4249    // Hidden state.
4250    TFL_StatefulTensor:$hidden_state,
4251
4252    // Attributes
4253    BoolAttr:$time_major,
4254    TFL_AFAttr:$fused_activation_function
4255  );
4256
4257  let results = (outs TFL_FpTensor:$output);
4258
4259  let hasOptions = 1;
4260
4261  let customOption = "SequenceRNNOptions";
4262
4263  let verifier = [{ return Verify(*this); }];
4264
4265  let extraClassDeclaration = [{
4266    // StatefulOpInterface:
4267    std::vector<int> GetStatefulOperands() { return {4}; }
4268  }];
4269}
4270
4271def TFL_WhereOp : TFL_Op<"where", [NoSideEffect]> {
4272  let summary = "Returns locations of nonzero / true values in a tensor.";
4273
4274  let description = [{
4275This operation returns the coordinates of true elements in `condition`. The
4276coordinates are returned in a 2-D tensor where the first dimension (rows)
4277represents the number of true elements, and the second dimension (columns)
4278represents the coordinates of the true elements. Keep in mind, the shape of
4279the output tensor can vary depending on how many true values there are in
4280`condition`. Indices are output in row-major order.
4281  }];
4282
4283  let arguments = (ins
4284    TFL_BoolTensor:$input
4285  );
4286
4287  let results = (outs
4288    TFL_I64Tensor:$index
4289  );
4290}
4291
4292def TFL_NumericVerifyOp : Op<TFL_Dialect, "NumericVerify", [
4293    SameOperandsShape]> {
4294
4295  let summary = "Verifies the numericals of the two operands";
4296
4297  let description = [{
4298    The NumericVerify op is a debugging op to verify the numericals of the two
4299    activations. It is a custom op in TFLite.
4300    If log_if_failed is true, the NumericVerify op calculates statistics on
4301    differences between float and quantized activations, output
4302    logs, set differences to the output tensors, and throws an error if errors
4303    above tolerance exist. If log_if_failed = false, then it doesn't care about
4304    errors.
4305  }];
4306
4307  let arguments = (ins
4308    TFL_TensorOf<[QI8, QUI8, QI16, F16, TFL_Quint8]>:$input,
4309    TFL_TensorOf<[F32]>:$ref,
4310
4311    // Attributes
4312    DefaultValuedAttr<F32Attr, "0.1">:$tolerance,
4313    DefaultValuedAttr<BoolAttr, "false">:$log_if_failed
4314  );
4315
4316  let results = (outs TFL_FpTensor:$output);
4317}
4318
4319// SVDF op.
4320def TFL_SVDFOp :
4321  TFL_Op<"svdf", [
4322    PredOpTrait<"the input and result tensor elemental types must be same",
4323      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4324    TFL_StatefulOp,
4325    AccumulatorUniformScale<3, 2, 4>]> {
4326
4327  let summary = "Single value decomposition filter operator";
4328
4329  let description = [{
4330    The SVDF op is a decomposition of a densely connected op into low rank
4331    filters.
4332    For details: https://research.google.com/pubs/pub43813.html
4333                 https://arxiv.org/abs/1812.02802
4334  }];
4335
4336  let arguments = (
4337    ins TFL_TensorOf<[F32, QI8]>:$input,
4338
4339    // Feature Weights.
4340    TFL_TensorOf<[F32, QI8, QUI8]>:$feature_weights,
4341
4342    // Time weights
4343    TFL_TensorOf<[F32, QI16]>:$time_weights,
4344
4345    // Bias
4346    TFL_TensorOfOrNone<[F32, QI32]>:$input_gate_bias,
4347
4348    // Activation state.
4349    TFL_StatefulTensor:$activation_state,
4350
4351    // Attributes
4352    Confined<I32Attr, [IntPositive]>:$rank,
4353    TFL_AFAttr:$fused_activation_function
4354  );
4355
4356  let results = (outs TFL_TensorOf<[F32, QI8]>:$output);
4357
4358  let hasOptions = 1;
4359
4360  let verifier = [{ return Verify(*this); }];
4361
4362  let extraClassDeclaration = [{
4363    // StatefulOpInterface:
4364    std::vector<int> GetStatefulOperands() { return {4}; }
4365  }];
4366}
4367
4368def TFL_SegmentSumOp: TFL_Op<"segment_sum", [
4369    NoSideEffect,
4370    PredOpTrait<"input and output must have same element type",
4371      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4372    NoQuantizableResult]> {
4373  let summary = "SegmentSum operator";
4374
4375  let description = [{
4376    Computes the sum along segments of a tensor.
4377  }];
4378
4379  let arguments = (ins
4380    TFL_TensorOf<[F32, I32]>:$input,
4381    TFL_I32Tensor:$segment_ids
4382  );
4383  let results = (outs TFL_TensorOf<[F32, I32]>:$output);
4384}
4385
4386def TFL_YieldOp : Op<TFL_Dialect, "yield", [Terminator]> {
4387  let summary = "Yield operation";
4388  let description = [{
4389    The "yield" operation represents a return operation within the conditional
4390    and body of structured control flow (e.g., while). The operation takes
4391    variable number of operands and produces no results. The operand number and
4392    types must match the signature of the region that contains the operation.
4393  }];
4394
4395  let arguments = (ins Variadic<AnyType>:$operands);
4396}
4397
4398def TFL_WhileOp : Op<TFL_Dialect, "while", [
4399    DeclareOpInterfaceMethods<LoopLikeOpInterface>,
4400    SingleBlockImplicitTerminator<"YieldOp">]> {
4401  let summary = [{While loop}];
4402
4403  let description = [{
4404    output = input; while (cond(output)) { output = body(output) }
4405
4406    While loop where all values are passes through arguments with implicit
4407    capture.
4408
4409    input: A list of input tensors whose types are T.
4410    output: A list of output tensors whose types are T.
4411    cond: A region that takes 'input' and returns a boolean scalar tensor.
4412    body: A region that takes a list of tensors and returns another
4413          list of tensors. Both lists have the same types.
4414  }];
4415
4416  let arguments = (ins
4417    Variadic<AnyTensor>:$input,
4418
4419    // Used to map StatelessWhile and While op defined in TensorFlow to a common
4420    // op.
4421    DefaultValuedAttr<BoolAttr, "false">:$is_stateless
4422  );
4423  let results = (outs Variadic<AnyTensor>:$output);
4424
4425  let regions = (region SizedRegion<1>:$cond, SizedRegion<1>:$body);
4426
4427  let verifier = [{ return Verify(*this); }];
4428
4429  let hasCanonicalizer = 1;
4430}
4431
4432def TFL_CallOnceOp : TFL_Op<"call_once", []> {
4433  let summary = "Invokes an initialization function";
4434
4435  let description = [{
4436This operation invokes the given initialization function for the session
4437initializer in tf saved model dialect.
4438  }];
4439
4440  let arguments = (ins
4441    StrAttr:$session_init_function
4442  );
4443
4444  let results = (outs);
4445}
4446
4447def TFL_CustomOp : Op<TFL_Dialect, "custom", [
4448  NoSideEffect, NoQuantizableResult]> {
4449  let summary = "Custom op";
4450
4451  let description = [{
4452    A generic op for any TFLite custom operation.
4453
4454    input: A list of inputs in the original op.
4455    custom_code: A string used to identify which exactly this op is, which
4456                 corresponds to operator_codes.custom_code in the flatbuffer.
4457    custom_option: a holder to save the op attributes in bytes fashion.
4458    output: A list of outputs in the original op.
4459  }];
4460
4461  let arguments = (ins
4462    Variadic<TFL_TensorOfOrNone<[AnyType]>>:$input,
4463    StrAttr:$custom_code,
4464    OpaqueBytesAttr:$custom_option
4465  );
4466  let results = (outs Variadic<AnyTensor>:$output);
4467
4468  let verifier = [{ return Verify(*this); }];
4469}
4470
4471def TFL_CustomTfOp : Op<TFL_Dialect, "custom_tf", [
4472  // Currently the custom ops have no side effect
4473  // TODO(karimnosseir): Revisit if this needs updating.
4474  NoSideEffect,
4475  NoQuantizableResult,
4476  SingleBlockImplicitTerminator<"YieldOp">]> {
4477  let summary = "Wrapper Op for TF custom ops.";
4478
4479  let description = [{
4480    A wrapper op around any Custom TF op. These includes ops defined using
4481    custom_opdefs or linked which are not defined in TF dialect.
4482    This Op just wraps the custom op inside a region.
4483    Note #1, this Op will not include TF Lite custom ops defined using CustomOp.
4484    Note #2, this op is just internal representation inside the converter and
4485    are not exposed/exported when the model is exported to Flatbuffer.
4486  }];
4487
4488  let arguments = (ins
4489    Variadic<TFL_TensorOfOrNone<[AnyType]>>:$input
4490  );
4491  let results = (outs Variadic<AnyTensor>:$output);
4492
4493  let regions = (region SizedRegion<1>:$body);
4494}
4495
4496def TFL_BroadcastToOp : TFL_Op<"broadcast_to", [
4497    PredOpTrait<"input and output must have same element type",
4498      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4499    TFL_OperandHasRankAtMost<0, 8>,
4500    TFL_OperandHasRank<1, 1>,
4501    PredOpTrait<"output dimension count must be at most 8",
4502      Or<[TFL_OperandIsUnrankedPred<1>,
4503          TFL_OperandDimIsAtMost<1, 0, 8>]>>,
4504    NoSideEffect]> {
4505  let summary = "Broadcast an array for a compatible shape.";
4506
4507  let description = [{
4508Broadcasting is the process of making arrays to have compatible shapes
4509for arithmetic operations. Two shapes are compatible if for each
4510dimension pair they are either equal or one of them is one. When trying
4511to broadcast a Tensor to a shape, it starts with the trailing dimensions,
4512and works its way forward.
4513
4514For example,
4515
4516>>> x = tf.constant([1, 2, 3])
4517>>> y = tf.broadcast_to(x, [3, 3])
4518>>> print(y)
4519tf.Tensor(
4520    [[1 2 3]
4521     [1 2 3]
4522     [1 2 3]], shape=(3, 3), dtype=int32)
4523
4524In the above example, the input Tensor with the shape of `[1, 3]`
4525is broadcasted to output Tensor with shape of `[3, 3]`.
4526
4527When doing broadcasted operations such as multiplying a tensor
4528by a scalar, broadcasting (usually) confers some time or space
4529benefit, as the broadcasted tensor is never materialized.
4530
4531However, `broadcast_to` does not carry with it any such benefits.
4532The newly-created tensor takes the full memory of the broadcasted
4533shape. (In a graph context, `broadcast_to` might be fused to
4534subsequent operation and then be optimized away, however.)
4535  }];
4536
4537  let arguments = (ins
4538    TFL_TensorOf<[F32, I32, I1, I8, QI8, UI8, QUI8, I16, QI16, I64, Complex<F<32>>]>:$input,
4539    TFL_I32OrI64Tensor:$shape
4540  );
4541
4542  let results = (outs
4543    TFL_TensorOf<[F32, I32, I1, I8, QI8, UI8, QUI8, I16, QI16, I64, Complex<F<32>>]>:$output
4544  );
4545}
4546
4547def TFL_RFFT2dOp : TFL_Op<"RFFT2D", [NoSideEffect, NoQuantizableResult]> {
4548  let summary = "2D real-valued fast Fourier transform.";
4549
4550  let description = [{
4551Computes the 2-dimensional discrete Fourier transform of a real-valued signal
4552over the inner-most 2 dimensions of `input`.
4553
4554Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
4555`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
4556of `output`: the zero-frequency term, followed by the `fft_length / 2`
4557positive-frequency terms.
4558
4559Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
4560corresponding dimension of `input`, the dimension is cropped. If it is larger,
4561the dimension is padded with zeros.
4562  }];
4563
4564  let arguments = (ins
4565    TFL_FpTensor:$input,
4566    TFL_I32Tensor:$fft_length
4567  );
4568
4569  let results = (outs
4570    TFL_Complex64Tensor:$output
4571  );
4572}
4573
4574def TFL_AssignVariableOp : TFL_Op<"assign_variable", []> {
4575  let summary = "Assigns a new value to a variable.";
4576
4577  let description = [{
4578Any ReadVariableOp with a control dependency on this op is guaranteed to return
4579this value or a subsequent newer value of the variable.
4580  }];
4581
4582  let arguments = (ins
4583    // TODO(b/149099381): Remove integer IDs after adding the new variable
4584    // handle type.
4585    TFL_TensorOf<[I32]>:$resource_id,
4586    // TODO(b/149099381): Support other types too.
4587    TFL_TensorOf<[F32]>:$value
4588  );
4589
4590  let results = (outs);
4591}
4592
4593def TFL_ReadVariableOp : TFL_Op<"read_variable", []> {
4594  let summary = "Reads variable value.";
4595
4596  let description = [{
4597Read variable data identified by 'resource_id'.
4598  }];
4599
4600  let arguments = (ins
4601    // TODO(b/149099381): Remove integer IDs after adding the new variable
4602    // handle type.
4603    TFL_TensorOf<[I32]>:$resource_id
4604  );
4605
4606  // TODO(b/149099381): Support other types too.
4607  let results = (outs TFL_TensorOf<[F32]>:$result);
4608}
4609
4610def TFL_Conv3DOp : TFL_Op<"conv_3d", [
4611    NoSideEffect,
4612    AccumulatorUniformScale<2, 0, 1>,
4613    TFL_OperandHasRank<0, 5>,
4614    TFL_OperandHasRank<1, 5>,
4615    // Channel dimension in input and filter should match.
4616    TFL_OperandsHaveSameDimsTrait<0, 1, 4, 3>,
4617    PredOpTrait<"input and output must have same element type",
4618      TFL_TCresVTEtIsSameAsOp<0, 0>>,
4619    PredOpTrait<"bias and output must have same element type",
4620      Or<[
4621        TFL_OperandIsNoneType<2>,
4622        TFL_TCresVTEtIsSameAsOp<0, 2>]>>,
4623    PredOpTrait<"bias must has num of elements equals to 4th dim of filter",
4624      Or<[
4625        TFL_OperandIsNoneType<2>,
4626        TFL_NumElementsEqualsDim<2, 1, 4>]>>]> {
4627  let summary = "Convolution 3D operator";
4628
4629  let description = [{
4630    Performs convolution operation on 3D inputs.
4631    Inputs:
4632      `inputs[0]`: required: the input activation tensor
4633      `inputs[1]`: required: the filter weight tensor
4634      `inputs[2]`: optional: the bias tensor
4635  }];
4636
4637  let arguments = (ins
4638    TFL_TensorOf<[F32]>:$input,
4639    TFL_TensorOf<[F32]>:$filter,
4640    TFL_TensorOfOrNone<[F32]>:$bias,
4641    I32Attr:$dilation_d_factor,
4642    I32Attr:$dilation_h_factor,
4643    I32Attr:$dilation_w_factor,
4644    TFL_AFAttr:$fused_activation_function,
4645    TFL_PaddingAttr:$padding,
4646    I32Attr:$stride_d,
4647    I32Attr:$stride_h,
4648    I32Attr:$stride_w
4649  );
4650
4651  let results = (outs TFL_TensorOf<[F32]>:$output);
4652
4653  let hasOptions = 1;
4654
4655  let customOption = "Conv3DOptions";
4656}
4657
4658def TFL_ComplexAbsOp : TFL_Op<"complex_abs", [
4659  NoSideEffect,
4660  SameOperandsAndResultShape]> {
4661  let summary = "Computes the complex absolute value of a tensor.";
4662
4663  let description = [{
4664Given a tensor `x` of complex numbers, this operation returns a tensor of type
4665`float` or `double` that is the absolute value of each element in `x`. All
4666elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
4667value is computed as \\( \sqrt{a^2 + b^2}\\).
4668  }];
4669
4670  let arguments = (ins
4671    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
4672  );
4673
4674  let results = (outs
4675    TFL_TensorOf<[F32, F64]>:$output
4676  );
4677}
4678
4679def TFL_RealOp : TFL_Op<"real", [
4680  NoSideEffect,
4681  SameOperandsAndResultShape]> {
4682  let summary = "Returns the real part of a complex number.";
4683
4684  let description = [{
4685Given a tensor `input` of complex numbers, this operation returns a tensor of
4686type `float` that is the real part of each element in `input`. All elements in
4687`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
4688 part returned by this operation and *b* is the imaginary part.
4689  }];
4690
4691  let arguments = (ins
4692    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
4693  );
4694
4695  let results = (outs
4696    TFL_TensorOf<[F32, F64]>:$output
4697  );
4698}
4699
4700def TFL_ImagOp : TFL_Op<"imag", [
4701  NoSideEffect,
4702  SameOperandsAndResultShape]> {
4703  let summary = "Returns the imaginary part of a complex number.";
4704
4705  let description = [{
4706Given a tensor `input` of complex numbers, this operation returns a tensor of
4707type `float` that is the imaginary part of each element in `input`. All
4708elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
4709is the real part and *b* is the imaginary part returned by this operation.
4710  }];
4711
4712  let arguments = (ins
4713    TFL_TensorOf<[Complex<F<32>>, Complex<F<64>>]>:$input
4714  );
4715
4716  let results = (outs
4717    TFL_TensorOf<[F32, F64]>:$output
4718  );
4719}
4720
4721#endif // TFL_OPS
4722