• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the auto-generated operation definition file for TensorFlow.
17//
18// PLEASE DO NOT MANUALLY EDIT THIS FILE!
19//
20// If you absolutely need to modify the generated fields of an op, move the op
21// definition to `tf_ops.td` and perform the modification there.
22//
23// This file contains TensorFlow ops whose definitions are programmatically
24// generated from the TF op registration and the api-def-files in the following
25// folder:
26// tensorflow/core/api_def/base_api
27// The generated fields for an op include name, summary, description, traits,
28// arguments, results, derived attributes. Therefore, modifications to these
29// fields will NOT be respected upon subsequent refreshes. However, additional
30// fields after those fields will be retained.
31//
32// Ops in this file are sorted alphabetically.
33
34include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td"
35include "mlir/Interfaces/CallInterfaces.td"
36include "mlir/Interfaces/InferTypeOpInterface.td"
37include "mlir/IR/OpAsmInterface.td"
38
39def TF_AbsOp : TF_Op<"Abs", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
40  let summary = "Computes the absolute value of a tensor.";
41
42  let description = [{
43Given a tensor `x`, this operation returns a tensor containing the absolute
44value of each element in `x`. For example, if x is an input element and y is
45an output element, this operation computes \\(y = |x|\\).
46  }];
47
48  let arguments = (ins
49    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
50  );
51
52  let results = (outs
53    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
54  );
55
56  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
57}
58
59def TF_AcosOp : TF_Op<"Acos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
60  let summary = "Computes acos of x element-wise.";
61
62  let description = [{
63Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
64
65  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
66  }];
67
68  let arguments = (ins
69    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
70  );
71
72  let results = (outs
73    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
74  );
75
76  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
77}
78
79def TF_AcoshOp : TF_Op<"Acosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
80  let summary = "Computes inverse hyperbolic cosine of x element-wise.";
81
82  let description = [{
83Given an input tensor, the function computes inverse hyperbolic cosine of every element.
84Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
85
86```python
87x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
88tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
89```
90  }];
91
92  let arguments = (ins
93    TF_FpOrComplexTensor:$x
94  );
95
96  let results = (outs
97    TF_FpOrComplexTensor:$y
98  );
99
100  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
101}
102
103def TF_AddOp : TF_Op<"Add", [NoSideEffect, ResultsBroadcastableShape, TF_LayoutAgnostic, TF_SameOperandsAndResultElementTypeResolveRef]>,
104               WithBroadcastableBinOpBuilder {
105  let summary = "Returns x + y element-wise.";
106
107  let description = [{
108*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
109[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
110
111Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
112
113Both input and output have a range `(-inf, inf)`.
114  }];
115
116  let arguments = (ins
117    TF_NumberNotQuantizedOrStrTensor:$x,
118    TF_NumberNotQuantizedOrStrTensor:$y
119  );
120
121  let results = (outs
122    TF_NumberNotQuantizedOrStrTensor:$z
123  );
124
125  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
126
127  let hasCanonicalizer = 1;
128}
129
130def TF_AddNOp : TF_Op<"AddN", [Commutative, NoSideEffect]> {
131  let summary = "Add all input tensors element wise.";
132
133  let description = [{
134Inputs must be of same size and shape.
135
136  ```python
137  x = [9, 7, 10]
138  tf.math.add_n(x) ==> 26
139  ```
140  }];
141
142  let arguments = (ins
143    Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>>:$inputs
144  );
145
146  let results = (outs
147    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>:$sum
148  );
149
150  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
151  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
152
153  let hasFolder = 1;
154}
155
156def TF_AddV2Op : TF_Op<"AddV2", [Commutative, NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_LayoutAgnostic, TF_SameOperandsAndResultElementTypeResolveRef]>,
157                 WithBroadcastableBinOpBuilder {
158  let summary = "Returns x + y element-wise.";
159
160  let description = [{
161*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
162[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
163  }];
164
165  let arguments = (ins
166    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
167    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
168  );
169
170  let results = (outs
171    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
172  );
173
174  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
175
176  let hasCanonicalizer = 1;
177
178  let hasFolder = 1;
179}
180
181def TF_AdjustContrastv2Op : TF_Op<"AdjustContrastv2", [NoSideEffect]> {
182  let summary = "Adjust the contrast of one or more images.";
183
184  let description = [{
185`images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
186interpreted as `[height, width, channels]`.  The other dimensions only
187represent a collection of images, such as `[batch, height, width, channels].`
188
189Contrast is adjusted independently for each channel of each image.
190
191For each channel, the Op first computes the mean of the image pixels in the
192channel and then adjusts each component of each pixel to
193`(x - mean) * contrast_factor + mean`.
194  }];
195
196  let arguments = (ins
197    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
198    Arg<TF_Float32Tensor, [{A float multiplier for adjusting contrast.}]>:$contrast_factor
199  );
200
201  let results = (outs
202    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The contrast-adjusted image or images.}]>:$output
203  );
204
205  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
206}
207
208def TF_AdjustHueOp : TF_Op<"AdjustHue", [NoSideEffect]> {
209  let summary = "Adjust the hue of one or more images.";
210
211  let description = [{
212`images` is a tensor of at least 3 dimensions.  The last dimension is
213interpreted as channels, and must be three.
214
215The input image is considered in the RGB colorspace. Conceptually, the RGB
216colors are first mapped into HSV. A delta is then applied all the hue values,
217and then remapped back to RGB colorspace.
218  }];
219
220  let arguments = (ins
221    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
222    Arg<TF_Float32Tensor, [{A float delta to add to the hue.}]>:$delta
223  );
224
225  let results = (outs
226    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
227  );
228
229  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
230}
231
232def TF_AdjustSaturationOp : TF_Op<"AdjustSaturation", [NoSideEffect]> {
233  let summary = "Adjust the saturation of one or more images.";
234
235  let description = [{
236`images` is a tensor of at least 3 dimensions.  The last dimension is
237interpreted as channels, and must be three.
238
239The input image is considered in the RGB colorspace. Conceptually, the RGB
240colors are first mapped into HSV. A scale is then applied all the saturation
241values, and then remapped back to RGB colorspace.
242  }];
243
244  let arguments = (ins
245    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
246    Arg<TF_Float32Tensor, [{A float scale to add to the saturation.}]>:$scale
247  );
248
249  let results = (outs
250    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
251  );
252
253  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
254}
255
256def TF_AllOp : TF_Op<"All", [NoSideEffect]> {
257  let summary = [{
258Computes the "logical and" of elements across dimensions of a tensor.
259  }];
260
261  let description = [{
262Reduces `input` along the dimensions given in `axis`. Unless
263`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
264`axis`. If `keep_dims` is true, the reduced dimensions are
265retained with length 1.
266  }];
267
268  let arguments = (ins
269    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
270    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
271`[-rank(input), rank(input))`.}]>:$reduction_indices,
272
273    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
274  );
275
276  let results = (outs
277    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
278  );
279
280  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
281
282  let verifier = [{ return Verify(*this); }];
283}
284
285def TF_AllToAllOp : TF_Op<"AllToAll", [NoSideEffect, TF_NoConstantFold]> {
286  let summary = "An Op to exchange data across TPU replicas.";
287
288  let description = [{
289On each replica, the input is split into `split_count` blocks along
290`split_dimension` and send to the other replicas given group_assignment. After
291receiving `split_count` - 1 blocks from other replicas, we concatenate the
292blocks along `concat_dimension` as the output.
293
294For example, suppose there are 2 TPU replicas:
295replica 0 receives input: `[[A, B]]`
296replica 1 receives input: `[[C, D]]`
297
298group_assignment=`[[0, 1]]`
299concat_dimension=0
300split_dimension=1
301split_count=2
302
303replica 0's output: `[[A], [C]]`
304replica 1's output: `[[B], [D]]`
305  }];
306
307  let arguments = (ins
308    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The local input to the sum.}]>:$input,
309    Arg<TF_Int32Tensor, [{An int32 tensor with shape
310[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
311replica ids in the ith subgroup.}]>:$group_assignment,
312
313    I64Attr:$concat_dimension,
314    I64Attr:$split_dimension,
315    I64Attr:$split_count
316  );
317
318  let results = (outs
319    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The exchanged result.}]>:$output
320  );
321
322  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
323}
324
325def TF_AngleOp : TF_Op<"Angle", [NoSideEffect, SameOperandsAndResultShape]> {
326  let summary = "Returns the argument of a complex number.";
327
328  let description = [{
329Given a tensor `input` of complex numbers, this operation returns a tensor of
330type `float` that is the argument of each element in `input`. All elements in
331`input` must be complex numbers of the form \\(a + bj\\), where *a*
332is the real part and *b* is the imaginary part.
333
334The argument returned by this operation is of the form \\(atan2(b, a)\\).
335
336For example:
337
338```
339# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
340tf.angle(input) ==> [2.0132, 1.056]
341```
342
343@compatibility(numpy)
344Equivalent to np.angle.
345@end_compatibility
346  }];
347
348  let arguments = (ins
349    TensorOf<[TF_Complex128, TF_Complex64]>:$input
350  );
351
352  let results = (outs
353    TF_F32OrF64Tensor:$output
354  );
355
356  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
357  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
358}
359
360def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", [TF_UniqueResourceAllocation]> {
361  let summary = "A container for an iterator resource.";
362
363  let arguments = (ins
364    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
365    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
366  );
367
368  let results = (outs
369    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
370"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
371resource sharing by name, and does not keep a reference to the resource
372container.}], [TF_DatasetIteratorAlloc]>:$handle
373  );
374}
375
376def TF_AnonymousIteratorV2Op : TF_Op<"AnonymousIteratorV2", [TF_UniqueResourceAllocation]> {
377  let summary = "A container for an iterator resource.";
378
379  let arguments = (ins
380    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
381    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
382  );
383
384  let results = (outs
385    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
386"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
387resource sharing by name, and does not keep a reference to the resource
388container.}], [TF_DatasetIteratorAlloc]>:$handle,
389    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
390  );
391}
392
393def TF_AnonymousMemoryCacheOp : TF_Op<"AnonymousMemoryCache", [TF_UniqueResourceAllocation]> {
394  let summary = "";
395
396  let arguments = (ins);
397
398  let results = (outs
399    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle,
400    TF_VariantTensor:$deleter
401  );
402}
403
404def TF_AnonymousMultiDeviceIteratorOp : TF_Op<"AnonymousMultiDeviceIterator", [TF_UniqueResourceAllocation]> {
405  let summary = "A container for a multi device iterator resource.";
406
407  let arguments = (ins
408    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
409    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
410    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
411  );
412
413  let results = (outs
414    Res<TF_ResourceTensor, [{A handle to a multi device iterator that can be passed to a
415"MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
416AnonymousIterator prevents resource sharing by name, and does not keep a
417reference to the resource container.}], [TF_DatasetIteratorAlloc]>:$handle,
418    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
419  );
420}
421
422def TF_AnonymousRandomSeedGeneratorOp : TF_Op<"AnonymousRandomSeedGenerator", [TF_UniqueResourceAllocation]> {
423  let summary = "";
424
425  let arguments = (ins
426    TF_Int64Tensor:$seed,
427    TF_Int64Tensor:$seed2
428  );
429
430  let results = (outs
431    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
432    TF_VariantTensor:$deleter
433  );
434}
435
436def TF_AnonymousSeedGeneratorOp : TF_Op<"AnonymousSeedGenerator", [TF_UniqueResourceAllocation]> {
437  let summary = "";
438
439  let arguments = (ins
440    TF_Int64Tensor:$seed,
441    TF_Int64Tensor:$seed2,
442    TF_BoolTensor:$reshuffle
443  );
444
445  let results = (outs
446    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
447    TF_VariantTensor:$deleter
448  );
449}
450
451def TF_AnyOp : TF_Op<"Any", [NoSideEffect]> {
452  let summary = [{
453Computes the "logical or" of elements across dimensions of a tensor.
454  }];
455
456  let description = [{
457Reduces `input` along the dimensions given in `axis`. Unless
458`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
459`axis`. If `keep_dims` is true, the reduced dimensions are
460retained with length 1.
461  }];
462
463  let arguments = (ins
464    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
465    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
466`[-rank(input), rank(input))`.}]>:$reduction_indices,
467
468    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
469  );
470
471  let results = (outs
472    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
473  );
474
475  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
476
477  let verifier = [{ return Verify(*this); }];
478}
479
480def TF_ApproximateEqualOp : TF_Op<"ApproximateEqual", [Commutative, NoSideEffect]> {
481  let summary = "Returns the truth value of abs(x-y) < tolerance element-wise.";
482
483  let arguments = (ins
484    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
485    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y,
486
487    DefaultValuedAttr<F32Attr, "1e-05f">:$tolerance
488  );
489
490  let results = (outs
491    TF_BoolTensor:$z
492  );
493
494  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
495}
496
497def TF_ArgMaxOp : TF_Op<"ArgMax", [NoSideEffect]> {
498  let summary = [{
499Returns the index with the largest value across dimensions of a tensor.
500  }];
501
502  let description = [{
503Note that in case of ties the identity of the return value is not guaranteed.
504
505Usage:
506  ```python
507  import tensorflow as tf
508  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
509  b = tf.math.argmax(input = a)
510  c = tf.keras.backend.eval(b)
511  # c = 4
512  # here a[4] = 166.32 which is the largest element of a across axis 0
513  ```
514  }];
515
516  let arguments = (ins
517    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
518    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
519Describes which dimension of the input Tensor to reduce across. For vectors,
520use dimension = 0.}]>:$dimension
521  );
522
523  let results = (outs
524    TF_I32OrI64Tensor:$output
525  );
526
527  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
528  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
529  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
530}
531
532def TF_ArgMinOp : TF_Op<"ArgMin", [NoSideEffect]> {
533  let summary = [{
534Returns the index with the smallest value across dimensions of a tensor.
535  }];
536
537  let description = [{
538Note that in case of ties the identity of the return value is not guaranteed.
539
540Usage:
541  ```python
542  import tensorflow as tf
543  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
544  b = tf.math.argmin(input = a)
545  c = tf.keras.backend.eval(b)
546  # c = 0
547  # here a[0] = 1 which is the smallest element of a across axis 0
548  ```
549  }];
550
551  let arguments = (ins
552    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
553    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
554Describes which dimension of the input Tensor to reduce across. For vectors,
555use dimension = 0.}]>:$dimension
556  );
557
558  let results = (outs
559    TF_I32OrI64Tensor:$output
560  );
561
562  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
563  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
564  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
565}
566
567def TF_AsStringOp : TF_Op<"AsString", [NoSideEffect, SameOperandsAndResultShape]> {
568  let summary = "Converts each entry in the given tensor to strings.";
569
570  let description = [{
571Supports many numeric types and boolean.
572
573For Unicode, see the
574[https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
575tutorial.
576
577Examples:
578
579>>> tf.strings.as_string([3, 2])
580<tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
581>>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
582array([b'3.14', b'2.72'], dtype=object)
583  }];
584
585  let arguments = (ins
586    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>:$input,
587
588    DefaultValuedAttr<I64Attr, "-1">:$precision,
589    DefaultValuedAttr<BoolAttr, "false">:$scientific,
590    DefaultValuedAttr<BoolAttr, "false">:$shortest,
591    DefaultValuedAttr<I64Attr, "-1">:$width,
592    DefaultValuedAttr<StrAttr, "">:$fill
593  );
594
595  let results = (outs
596    TF_StrTensor:$output
597  );
598
599  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
600}
601
602def TF_AsinOp : TF_Op<"Asin", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
603  let summary = "Computes the trignometric inverse sine of x element-wise.";
604
605  let description = [{
606The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
607if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
608
609**Note**: The output of `tf.math.asin` will lie within the invertible range
610of sine, i.e [-pi/2, pi/2].
611
612For example:
613
614```python
615# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
616x = tf.constant([1.047, 0.785])
617y = tf.math.sin(x) # [0.8659266, 0.7068252]
618
619tf.math.asin(y) # [1.047, 0.785] = x
620```
621  }];
622
623  let arguments = (ins
624    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
625  );
626
627  let results = (outs
628    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
629  );
630
631  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
632}
633
634def TF_AsinhOp : TF_Op<"Asinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
635  let summary = "Computes inverse hyperbolic sine of x element-wise.";
636
637  let description = [{
638Given an input tensor, this function computes inverse hyperbolic sine
639  for every element in the tensor. Both input and output has a range of
640  `[-inf, inf]`.
641
642  ```python
643  x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
644  tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
645  ```
646  }];
647
648  let arguments = (ins
649    TF_FpOrComplexTensor:$x
650  );
651
652  let results = (outs
653    TF_FpOrComplexTensor:$y
654  );
655
656  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
657}
658
659def TF_AssertOp : TF_Op<"Assert", []> {
660  let summary = "Asserts that the given condition is true.";
661
662  let description = [{
663If `condition` evaluates to false, print the list of tensors in `data`.
664`summarize` determines how many entries of the tensors to print.
665  }];
666
667  let arguments = (ins
668    Arg<TF_BoolTensor, [{The condition to evaluate.}]>:$condition,
669    Arg<Variadic<TF_Tensor>, [{The tensors to print out when condition is false.}]>:$data,
670
671    DefaultValuedAttr<I64Attr, "3">:$summarize
672  );
673
674  let results = (outs);
675
676  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<1>;
677
678  let hasCanonicalizer = 1;
679}
680
681def TF_AssignOp : TF_Op<"Assign", []> {
682  let summary = "Update 'ref' by assigning 'value' to it.";
683
684  let description = [{
685This operation outputs "ref" after the assignment is done.
686This makes it easier to chain operations that need to use the reset value.
687  }];
688
689  let arguments = (ins
690    Arg<TF_Tensor, [{Should be from a `Variable` node. May be uninitialized.}]>:$ref,
691    Arg<TF_Tensor, [{The value to be assigned to the variable.}]>:$value,
692
693    DefaultValuedAttr<BoolAttr, "true">:$validate_shape,
694    DefaultValuedAttr<BoolAttr, "true">:$use_locking
695  );
696
697  let results = (outs
698    Res<TF_Tensor, [{= Same as "ref".  Returned as a convenience for operations that want
699to use the new value after the variable has been reset.}]>:$output_ref
700  );
701
702  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
703}
704
705def TF_AssignAddVariableOp : TF_Op<"AssignAddVariableOp", []> {
706  let summary = "Adds a value to the current value of a variable.";
707
708  let description = [{
709Any ReadVariableOp with a control dependency on this op is guaranteed to
710see the incremented value or a subsequent newer one.
711  }];
712
713  let arguments = (ins
714    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
715    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
716  );
717
718  let results = (outs);
719
720  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
721}
722
723def TF_AssignSubVariableOp : TF_Op<"AssignSubVariableOp", []> {
724  let summary = "Subtracts a value from the current value of a variable.";
725
726  let description = [{
727Any ReadVariableOp with a control dependency on this op is guaranteed to
728see the decremented value or a subsequent newer one.
729  }];
730
731  let arguments = (ins
732    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
733    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
734  );
735
736  let results = (outs);
737
738  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
739}
740
741def TF_AssignVariableOp : TF_Op<"AssignVariableOp", []> {
742  let summary = "Assigns a new value to a variable.";
743
744  let description = [{
745Any ReadVariableOp with a control dependency on this op is guaranteed to return
746this value or a subsequent newer value of the variable.
747  }];
748
749  let arguments = (ins
750    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableWrite]>:$resource,
751    Arg<TF_Tensor, [{the value to set the new tensor to use.}]>:$value
752  );
753
754  let results = (outs);
755
756  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
757}
758
759def TF_AtanOp : TF_Op<"Atan", [NoSideEffect, SameOperandsAndResultType]> {
760  let summary = "Computes the trignometric inverse tangent of x element-wise.";
761
762  let description = [{
763The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
764if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
765
766**Note**: The output of `tf.math.atan` will lie within the invertible range
767of tan, i.e (-pi/2, pi/2).
768
769For example:
770
771```python
772# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
773x = tf.constant([1.047, 0.785])
774y = tf.math.tan(x) # [1.731261, 0.99920404]
775
776tf.math.atan(y) # [1.047, 0.785] = x
777```
778  }];
779
780  let arguments = (ins
781    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
782  );
783
784  let results = (outs
785    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
786  );
787
788  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
789}
790
791def TF_Atan2Op : TF_Op<"Atan2", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
792                 WithBroadcastableBinOpBuilder {
793  let summary = [{
794Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
795  }];
796
797  let description = [{
798This is the angle \\( \theta \in [-\pi, \pi] \\) such that
799\\[ x = r \cos(\theta) \\]
800and
801\\[ y = r \sin(\theta) \\]
802where \\(r = \sqrt{x^2 + y^2} \\).
803
804For example:
805
806>>> x = [1., 1.]
807>>> y = [1., -1.]
808>>> print((tf.math.atan2(y,x) * (180 / np.pi)).numpy())
809[ 45. -45.]
810  }];
811
812  let arguments = (ins
813    TF_FloatTensor:$y,
814    TF_FloatTensor:$x
815  );
816
817  let results = (outs
818    TF_FloatTensor:$z
819  );
820
821  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
822}
823
824def TF_AtanhOp : TF_Op<"Atanh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
825  let summary = "Computes inverse hyperbolic tangent of x element-wise.";
826
827  let description = [{
828Given an input tensor, this function computes inverse hyperbolic tangent
829  for every element in the tensor. Input range is `[-1,1]` and output range is
830  `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
831  input is `1`, output will be `inf`. Values outside the range will have
832  `nan` as output.
833
834  ```python
835  x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
836  tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
837  ```
838  }];
839
840  let arguments = (ins
841    TF_FpOrComplexTensor:$x
842  );
843
844  let results = (outs
845    TF_FpOrComplexTensor:$y
846  );
847
848  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
849}
850
851def TF_AvgPoolOp : TF_Op<"AvgPool", [NoSideEffect]> {
852  let summary = "Performs average pooling on the input.";
853
854  let description = [{
855Each entry in `output` is the mean of the corresponding size `ksize`
856window in `value`.
857  }];
858
859  let arguments = (ins
860    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$value,
861
862    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
863    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
864    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
865    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
866  );
867
868  let results = (outs
869    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
870  );
871
872  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
873}
874
875def TF_AvgPool3DOp : TF_Op<"AvgPool3D", [NoSideEffect]> {
876  let summary = "Performs 3D average pooling on the input.";
877
878  let description = [{
879Each entry in `output` is the mean of the corresponding size `ksize` window in
880`value`.
881  }];
882
883  let arguments = (ins
884    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
885
886    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
887    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
888    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
889    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
890  );
891
892  let results = (outs
893    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
894  );
895
896  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
897}
898
899def TF_AvgPool3DGradOp : TF_Op<"AvgPool3DGrad", [NoSideEffect]> {
900  let summary = "Computes gradients of average pooling function.";
901
902  let arguments = (ins
903    Arg<TF_Int32Tensor, [{The original input dimensions.}]>:$orig_input_shape,
904    Arg<TF_FloatTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
905
906    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
907    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
908    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
909    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
910  );
911
912  let results = (outs
913    Res<TF_FloatTensor, [{The backprop for input.}]>:$output
914  );
915
916  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
917}
918
919def TF_AvgPoolGradOp : TF_Op<"AvgPoolGrad", [NoSideEffect]> {
920  let summary = "Computes gradients of the average pooling function.";
921
922  let arguments = (ins
923    Arg<TF_Int32Tensor, [{1-D.  Shape of the original input to `avg_pool`.}]>:$orig_input_shape,
924    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
925the output of `avg_pool`.}]>:$grad,
926
927    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
928    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
929    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
930    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
931  );
932
933  let results = (outs
934    Res<TF_FloatTensor, [{4-D.  Gradients w.r.t. the input of `avg_pool`.}]>:$output
935  );
936
937  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
938}
939
940def TF_BatchDatasetV2Op : TF_Op<"BatchDatasetV2", [NoSideEffect]> {
941  let summary = [{
942Creates a dataset that batches `batch_size` elements from `input_dataset`.
943  }];
944
945  let arguments = (ins
946    TF_VariantTensor:$input_dataset,
947    Arg<TF_Int64Tensor, [{A scalar representing the number of elements to accumulate in a batch.}]>:$batch_size,
948    Arg<TF_BoolTensor, [{A scalar representing whether the last batch should be dropped in case its size
949is smaller than desired.}]>:$drop_remainder,
950
951    DefaultValuedAttr<BoolAttr, "false">:$parallel_copy,
952    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
953    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
954  );
955
956  let results = (outs
957    TF_VariantTensor:$handle
958  );
959}
960
961def TF_BatchFunctionOp : TF_Op<"BatchFunction", [AttrSizedOperandSegments, NoSideEffect]> {
962  let summary = [{
963Batches all the inputs tensors to the computation done by the function.
964  }];
965
966  let description = [{
967So, for example, in the following code
968
969  ```python
970
971  # This input will be captured.
972  y = tf.placeholder_with_default(1.0, shape=[])
973
974  @tf.Defun(tf.float32)
975  def computation(a):
976    return tf.matmul(a, a) + y
977
978  b = gen_batch_ops.batch_function(
979          f=computation
980          in_tensors=[a],
981          captured_tensors=computation.captured_inputs,
982          Tout=[o.type for o in computation.definition.signature.output_arg],
983          num_batch_threads=1,
984          max_batch_size=10,
985          batch_timeout_micros=100000,  # 100ms
986          allowed_batch_sizes=[3, 10],
987          batching_queue="")
988  ```
989
990If more than one session.run call is simultaneously trying to compute `b`
991the values of `a` will be gathered, non-deterministically concatenated
992along the first axis, and only one thread will run the computation.
993
994Assumes that all arguments of the function are Tensors which will be batched
995along their first dimension.
996
997Arguments that are captured, are not batched. The session.run call which does
998the concatenation, will use the values of the captured tensors available to it.
999Therefore, typical uses of captured tensors should involve values which remain
1000unchanged across session.run calls. Inference is a good example of this.
1001
1002SparseTensor is not supported. The return value of the decorated function
1003must be a Tensor or a list/tuple of Tensors.
1004  }];
1005
1006  let arguments = (ins
1007    Arg<Variadic<TF_Tensor>, [{The tensors to be batched.}]>:$in_tensors,
1008    Arg<Variadic<TF_Tensor>, [{The tensors which are captured in the function, and don't need
1009to be batched.}]>:$captured_tensors,
1010
1011    SymbolRefAttr:$f,
1012    I64Attr:$num_batch_threads,
1013    I64Attr:$max_batch_size,
1014    I64Attr:$batch_timeout_micros,
1015    DefaultValuedAttr<I64Attr, "10">:$max_enqueued_batches,
1016    DefaultValuedAttr<I64ArrayAttr, "{}">:$allowed_batch_sizes,
1017    DefaultValuedAttr<StrAttr, "">:$container,
1018    DefaultValuedAttr<StrAttr, "">:$shared_name,
1019    DefaultValuedAttr<StrAttr, "">:$batching_queue,
1020    DefaultValuedAttr<BoolAttr, "false">:$enable_large_batch_splitting
1021  );
1022
1023  let results = (outs
1024    Res<Variadic<TF_Tensor>, [{The output tensors.}]>:$out_tensors
1025  );
1026
1027  TF_DerivedOperandTypeListAttr Tcaptured = TF_DerivedOperandTypeListAttr<1>;
1028  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
1029  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
1030}
1031
1032def TF_BatchMatMulOp : TF_Op<"BatchMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
1033  let summary = "Multiplies slices of two tensors in batches.";
1034
1035  let description = [{
1036Multiplies all slices of `Tensor` `x` and `y` (each slice can be
1037viewed as an element of a batch), and arranges the individual results
1038in a single output tensor of the same batch size. Each of the
1039individual slices can optionally be adjointed (to adjoint a matrix
1040means to transpose and conjugate it) before multiplication by setting
1041the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
1042
1043The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
1044and `[..., r_y, c_y]`.
1045
1046The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
1047
1048    r_o = c_x if adj_x else r_x
1049    c_o = r_y if adj_y else c_y
1050
1051It is computed as:
1052
1053    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
1054  }];
1055
1056  let arguments = (ins
1057    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
1058    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
1059
1060    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
1061    DefaultValuedAttr<BoolAttr, "false">:$adj_y
1062  );
1063
1064  let results = (outs
1065    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
1066  );
1067
1068  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1069
1070  let hasCanonicalizer = 1;
1071
1072  let verifier = [{
1073    return Verify(*this);
1074  }];
1075}
1076
1077def TF_BatchMatMulV2Op : TF_Op<"BatchMatMulV2", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
1078  let summary = "Multiplies slices of two tensors in batches.";
1079
1080  let description = [{
1081Multiplies all slices of `Tensor` `x` and `y` (each slice can be
1082viewed as an element of a batch), and arranges the individual results
1083in a single output tensor of the same batch size. Each of the
1084individual slices can optionally be adjointed (to adjoint a matrix
1085means to transpose and conjugate it) before multiplication by setting
1086the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
1087
1088The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
1089and `[..., r_y, c_y]`.
1090
1091The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
1092
1093    r_o = c_x if adj_x else r_x
1094    c_o = r_y if adj_y else c_y
1095
1096It is computed as:
1097
1098    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
1099
1100*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
1101about broadcasting
1102[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
1103  }];
1104
1105  let arguments = (ins
1106    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
1107    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
1108
1109    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
1110    DefaultValuedAttr<BoolAttr, "false">:$adj_y
1111  );
1112
1113  let results = (outs
1114    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
1115  );
1116
1117  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1118
1119  let verifier = [{
1120    return Verify(*this);
1121  }];
1122
1123  let hasCanonicalizer = 1;
1124}
1125
1126def TF_BatchMatMulV3Op : TF_Op<"BatchMatMulV3", [NoSideEffect]> {
1127  let summary = "Multiplies slices of two tensors in batches.";
1128
1129  let description = [{
1130Multiplies all slices of `Tensor` `x` and `y` (each slice can be
1131viewed as an element of a batch), and arranges the individual results
1132in a single output tensor of the same batch size. Each of the
1133individual slices can optionally be adjointed (to adjoint a matrix
1134means to transpose and conjugate it) before multiplication by setting
1135the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
1136
1137The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
1138and `[..., r_y, c_y]`.
1139
1140The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
1141
1142    r_o = c_x if adj_x else r_x
1143    c_o = r_y if adj_y else c_y
1144
1145It is computed as:
1146
1147    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
1148
1149*NOTE*: `BatchMatMulV3` supports broadcasting in the batch dimensions. More
1150about broadcasting
1151[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
1152  }];
1153
1154  let arguments = (ins
1155    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint8]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
1156    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint8]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
1157
1158    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
1159    DefaultValuedAttr<BoolAttr, "false">:$adj_y
1160  );
1161
1162  let results = (outs
1163    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
1164  );
1165
1166  TF_DerivedOperandTypeAttr Ta = TF_DerivedOperandTypeAttr<0>;
1167  TF_DerivedOperandTypeAttr Tb = TF_DerivedOperandTypeAttr<1>;
1168  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
1169}
1170
1171def TF_BatchNormWithGlobalNormalizationOp : TF_Op<"BatchNormWithGlobalNormalization", [NoSideEffect]> {
1172  let summary = "Batch normalization.";
1173
1174  let description = [{
1175This op is deprecated. Prefer `tf.nn.batch_normalization`.
1176  }];
1177
1178  let arguments = (ins
1179    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 4D input Tensor.}]>:$t,
1180    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D mean Tensor with size matching the last dimension of t.
1181This is the first output from tf.nn.moments,
1182or a saved moving average thereof.}]>:$m,
1183    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D variance Tensor with size matching the last dimension of t.
1184This is the second output from tf.nn.moments,
1185or a saved moving average thereof.}]>:$v,
1186    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D beta Tensor with size matching the last dimension of t.
1187An offset to be added to the normalized tensor.}]>:$beta,
1188    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D gamma Tensor with size matching the last dimension of t.
1189If "scale_after_normalization" is true, this tensor will be multiplied
1190with the normalized tensor.}]>:$gamma,
1191
1192    F32Attr:$variance_epsilon,
1193    BoolAttr:$scale_after_normalization
1194  );
1195
1196  let results = (outs
1197    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$result
1198  );
1199
1200  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1201}
1202
1203def TF_BatchToSpaceOp : TF_Op<"BatchToSpace", [NoSideEffect]> {
1204  let summary = "BatchToSpace for 4-D tensors of type T.";
1205
1206  let description = [{
1207This is a legacy version of the more general BatchToSpaceND.
1208
1209Rearranges (permutes) data from batch into blocks of spatial data, followed by
1210cropping. This is the reverse transformation of SpaceToBatch. More specifically,
1211this op outputs a copy of the input tensor where values from the `batch`
1212dimension are moved in spatial blocks to the `height` and `width` dimensions,
1213followed by cropping along the `height` and `width` dimensions.
1214  }];
1215
1216  let arguments = (ins
1217    Arg<TF_Tensor, [{4-D tensor with shape
1218`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
1219  depth]`. Note that the batch size of the input tensor must be divisible by
1220`block_size * block_size`.}]>:$input,
1221    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
1222how many elements to crop from the intermediate result across the spatial
1223dimensions as follows:
1224
1225    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]}]>:$crops,
1226
1227    Confined<I64Attr, [IntMinValue<2>]>:$block_size
1228  );
1229
1230  let results = (outs
1231    Res<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`, where:
1232
1233      height = height_pad - crop_top - crop_bottom
1234      width = width_pad - crop_left - crop_right
1235
1236The attr `block_size` must be greater than one. It indicates the block size.
1237
1238Some examples:
1239
1240(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
1241
1242```
1243[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1244```
1245
1246The output tensor has shape `[1, 2, 2, 1]` and value:
1247
1248```
1249x = [[[[1], [2]], [[3], [4]]]]
1250```
1251
1252(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
1253
1254```
1255[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1256```
1257
1258The output tensor has shape `[1, 2, 2, 3]` and value:
1259
1260```
1261x = [[[[1, 2, 3], [4, 5, 6]],
1262      [[7, 8, 9], [10, 11, 12]]]]
1263```
1264
1265(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
1266
1267```
1268x = [[[[1], [3]], [[9], [11]]],
1269     [[[2], [4]], [[10], [12]]],
1270     [[[5], [7]], [[13], [15]]],
1271     [[[6], [8]], [[14], [16]]]]
1272```
1273
1274The output tensor has shape `[1, 4, 4, 1]` and value:
1275
1276```
1277x = [[[[1],   [2],  [3],  [4]],
1278     [[5],   [6],  [7],  [8]],
1279     [[9],  [10], [11],  [12]],
1280     [[13], [14], [15],  [16]]]]
1281```
1282
1283(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
1284
1285```
1286x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
1287     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
1288```
1289
1290The output tensor has shape `[2, 2, 4, 1]` and value:
1291
1292```
1293x = [[[[1], [3]], [[5], [7]]],
1294     [[[2], [4]], [[10], [12]]],
1295     [[[5], [7]], [[13], [15]]],
1296     [[[6], [8]], [[14], [16]]]]
1297```}]>:$output
1298  );
1299
1300  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1301  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1302
1303  let verifier = [{
1304    return Verify(*this);
1305  }];
1306
1307  let hasCanonicalizer = 1;
1308}
1309
1310def TF_BatchToSpaceNDOp : TF_Op<"BatchToSpaceND", [NoSideEffect]> {
1311  let summary = "BatchToSpace for N-D tensors of type T.";
1312
1313  let description = [{
1314This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
1315`block_shape + [batch]`, interleaves these blocks back into the grid defined by
1316the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
1317the input.  The spatial dimensions of this intermediate result are then
1318optionally cropped according to `crops` to produce the output.  This is the
1319reverse of SpaceToBatch.  See below for a precise description.
1320  }];
1321
1322  let arguments = (ins
1323    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
1324where spatial_shape has M dimensions.}]>:$input,
1325    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
1326    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
1327  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
1328  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
1329  required that
1330  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
1331
1332This operation is equivalent to the following steps:
1333
13341. Reshape `input` to `reshaped` of shape:
1335     [block_shape[0], ..., block_shape[M-1],
1336      batch / prod(block_shape),
1337      input_shape[1], ..., input_shape[N-1]]
1338
13392. Permute dimensions of `reshaped` to produce `permuted` of shape
1340     [batch / prod(block_shape),
1341
1342      input_shape[1], block_shape[0],
1343      ...,
1344      input_shape[M], block_shape[M-1],
1345
1346      input_shape[M+1], ..., input_shape[N-1]]
1347
13483. Reshape `permuted` to produce `reshaped_permuted` of shape
1349     [batch / prod(block_shape),
1350
1351      input_shape[1] * block_shape[0],
1352      ...,
1353      input_shape[M] * block_shape[M-1],
1354
1355      input_shape[M+1],
1356      ...,
1357      input_shape[N-1]]
1358
13594. Crop the start and end of dimensions `[1, ..., M]` of
1360   `reshaped_permuted` according to `crops` to produce the output of shape:
1361     [batch / prod(block_shape),
1362
1363      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
1364      ...,
1365      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
1366
1367      input_shape[M+1], ..., input_shape[N-1]]
1368
1369Some examples:
1370
1371(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
1372    `crops = [[0, 0], [0, 0]]`:
1373
1374```
1375[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1376```
1377
1378The output tensor has shape `[1, 2, 2, 1]` and value:
1379
1380```
1381x = [[[[1], [2]], [[3], [4]]]]
1382```
1383
1384(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
1385    `crops = [[0, 0], [0, 0]]`:
1386
1387```
1388[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1389```
1390
1391The output tensor has shape `[1, 2, 2, 3]` and value:
1392
1393```
1394x = [[[[1, 2, 3], [4, 5, 6]],
1395      [[7, 8, 9], [10, 11, 12]]]]
1396```
1397
1398(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
1399    `crops = [[0, 0], [0, 0]]`:
1400
1401```
1402x = [[[[1], [3]], [[9], [11]]],
1403     [[[2], [4]], [[10], [12]]],
1404     [[[5], [7]], [[13], [15]]],
1405     [[[6], [8]], [[14], [16]]]]
1406```
1407
1408The output tensor has shape `[1, 4, 4, 1]` and value:
1409
1410```
1411x = [[[[1],   [2],  [3],  [4]],
1412     [[5],   [6],  [7],  [8]],
1413     [[9],  [10], [11],  [12]],
1414     [[13], [14], [15],  [16]]]]
1415```
1416
1417(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
1418    `crops = [[0, 0], [2, 0]]`:
1419
1420```
1421x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
1422     [[[0], [2], [4]]], [[[0], [10], [12]]],
1423     [[[0], [5], [7]]], [[[0], [13], [15]]],
1424     [[[0], [6], [8]]], [[[0], [14], [16]]]]
1425```
1426
1427The output tensor has shape `[2, 2, 4, 1]` and value:
1428
1429```
1430x = [[[[1],   [2],  [3],  [4]],
1431      [[5],   [6],  [7],  [8]]],
1432     [[[9],  [10], [11],  [12]],
1433      [[13], [14], [15],  [16]]]]
1434```}]>:$crops
1435  );
1436
1437  let results = (outs
1438    TF_Tensor:$output
1439  );
1440
1441  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1442  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
1443  TF_DerivedOperandTypeAttr Tcrops = TF_DerivedOperandTypeAttr<2>;
1444
1445  let verifier = [{
1446    return Verify(*this);
1447  }];
1448}
1449
1450def TF_BetaincOp : TF_Op<"Betainc", [NoSideEffect]> {
1451  let summary = [{
1452Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
1453  }];
1454
1455  let description = [{
1456The regularized incomplete beta integral is defined as:
1457
1458
1459\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
1460
1461where
1462
1463
1464\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
1465
1466
1467is the incomplete beta function and \\(B(a, b)\\) is the *complete*
1468beta function.
1469  }];
1470
1471  let arguments = (ins
1472    TF_F32OrF64Tensor:$a,
1473    TF_F32OrF64Tensor:$b,
1474    TF_F32OrF64Tensor:$x
1475  );
1476
1477  let results = (outs
1478    TF_F32OrF64Tensor:$z
1479  );
1480
1481  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1482}
1483
1484def TF_BiasAddOp : TF_Op<"BiasAdd", [NoSideEffect, TF_LayoutSensitiveInterface]> {
1485  let summary = "Adds `bias` to `value`.";
1486
1487  let description = [{
1488This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1489Broadcasting is supported, so `value` may have any number of dimensions.
1490  }];
1491
1492  let arguments = (ins
1493    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1494    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias,
1495
1496    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1497  );
1498
1499  let results = (outs
1500    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1501  );
1502
1503  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1504
1505  let extraClassDeclaration = [{
1506    // TF_LayoutSensitiveInterface:
1507    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
1508    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
1509    StringRef GetOptimalLayout(const RuntimeDevices& devices);
1510    LogicalResult UpdateDataFormat(StringRef data_format);
1511  }];
1512
1513  let verifier = [{
1514    return Verify(*this);
1515  }];
1516}
1517
1518def TF_BiasAddGradOp : TF_Op<"BiasAddGrad", [NoSideEffect]> {
1519  let summary = [{
1520The backward operation for "BiasAdd" on the "bias" tensor.
1521  }];
1522
1523  let description = [{
1524It accumulates all the values from out_backprop into the feature dimension.
1525For NHWC data format, the feature dimension is the last. For NCHW data format,
1526the feature dimension is the third-to-last.
1527  }];
1528
1529  let arguments = (ins
1530    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$out_backprop,
1531
1532    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1533  );
1534
1535  let results = (outs
1536    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the feature dimension of `out_backprop`.}]>:$output
1537  );
1538
1539  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1540
1541  let verifier = [{
1542    return Verify(*this);
1543  }];
1544}
1545
1546def TF_BiasAddV1Op : TF_Op<"BiasAddV1", [NoSideEffect]> {
1547  let summary = "Adds `bias` to `value`.";
1548
1549  let description = [{
1550This is a deprecated version of BiasAdd and will be soon removed.
1551
1552This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1553Broadcasting is supported, so `value` may have any number of dimensions.
1554  }];
1555
1556  let arguments = (ins
1557    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1558    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias
1559  );
1560
1561  let results = (outs
1562    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1563  );
1564
1565  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1566
1567  let hasCanonicalizer = 1;
1568}
1569
1570def TF_BincountOp : TF_Op<"Bincount", [NoSideEffect]> {
1571  let summary = [{
1572Counts the number of occurrences of each value in an integer array.
1573  }];
1574
1575  let description = [{
1576Outputs a vector with length `size` and the same dtype as `weights`. If
1577`weights` are empty, then index `i` stores the number of times the value `i` is
1578counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
1579the value in `weights` at each index where the corresponding value in `arr` is
1580`i`.
1581
1582Values in `arr` outside of the range [0, size) are ignored.
1583  }];
1584
1585  let arguments = (ins
1586    Arg<TF_Int32Tensor, [{int32 `Tensor`.}]>:$arr,
1587    Arg<TF_Int32Tensor, [{non-negative int32 scalar `Tensor`.}]>:$size,
1588    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{is an int32, int64, float32, or float64 `Tensor` with the same
1589shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
1590equal to 1.}]>:$weights
1591  );
1592
1593  let results = (outs
1594    Res<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{1D `Tensor` with length equal to `size`. The counts or summed weights for
1595each value in the range [0, size).}]>:$bins
1596  );
1597
1598  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
1599}
1600
1601def TF_BitcastOp : TF_Op<"Bitcast", [NoSideEffect]> {
1602  let summary = [{
1603Bitcasts a tensor from one type to another without copying data.
1604  }];
1605
1606  let description = [{
1607Given a tensor `input`, this operation returns a tensor that has the same buffer
1608data as `input` with datatype `type`.
1609
1610If the input datatype `T` is larger than the output datatype `type` then the
1611shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
1612
1613If `T` is smaller than `type`, the operator requires that the rightmost
1614dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
1615[..., sizeof(`type`)/sizeof(`T`)] to [...].
1616
1617tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
1618(e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
1619gives module error.
1620For example,
1621
1622Example 1:
1623
1624>>> a = [1., 2., 3.]
1625>>> equality_bitcast = tf.bitcast(a, tf.complex128)
1626Traceback (most recent call last):
1627...
1628InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
1629>>> equality_cast = tf.cast(a, tf.complex128)
1630>>> print(equality_cast)
1631tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
1632
1633Example 2:
1634
1635>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
1636<tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
1637
1638Example 3:
1639
1640>>> x = [1., 2., 3.]
1641>>> y = [0., 2., 3.]
1642>>> equality= tf.equal(x,y)
1643>>> equality_cast = tf.cast(equality,tf.float32)
1644>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
1645>>> print(equality)
1646tf.Tensor([False True True], shape=(3,), dtype=bool)
1647>>> print(equality_cast)
1648tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
1649>>> print(equality_bitcast)
1650tf.Tensor(
1651    [[  0   0   0   0]
1652     [  0   0 128  63]
1653     [  0   0 128  63]], shape=(3, 4), dtype=uint8)
1654
1655*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
1656endian orderings will give different results.
1657  }];
1658
1659  let arguments = (ins
1660    TF_NumberTensor:$input
1661  );
1662
1663  let results = (outs
1664    TF_NumberTensor:$output
1665  );
1666
1667  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1668  TF_DerivedResultTypeAttr type = TF_DerivedResultTypeAttr<0>;
1669
1670  let hasCanonicalizer = 1;
1671}
1672
1673def TF_BitwiseAndOp : TF_Op<"BitwiseAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1674                      WithBroadcastableBinOpBuilder {
1675  let summary = "Elementwise computes the bitwise AND of `x` and `y`.";
1676
1677  let description = [{
1678The result will have those bits set, that are set in both `x` and `y`. The
1679computation is performed on the underlying representations of `x` and `y`.
1680
1681For example:
1682
1683```python
1684import tensorflow as tf
1685from tensorflow.python.ops import bitwise_ops
1686dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1687              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1688
1689for dtype in dtype_list:
1690  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1691  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1692  exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
1693
1694  res = bitwise_ops.bitwise_and(lhs, rhs)
1695  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1696```
1697  }];
1698
1699  let arguments = (ins
1700    TF_IntTensor:$x,
1701    TF_IntTensor:$y
1702  );
1703
1704  let results = (outs
1705    TF_IntTensor:$z
1706  );
1707
1708  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1709}
1710
1711def TF_BitwiseOrOp : TF_Op<"BitwiseOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1712                     WithBroadcastableBinOpBuilder {
1713  let summary = "Elementwise computes the bitwise OR of `x` and `y`.";
1714
1715  let description = [{
1716The result will have those bits set, that are set in `x`, `y` or both. The
1717computation is performed on the underlying representations of `x` and `y`.
1718
1719For example:
1720
1721```python
1722import tensorflow as tf
1723from tensorflow.python.ops import bitwise_ops
1724dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1725              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1726
1727for dtype in dtype_list:
1728  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1729  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1730  exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
1731
1732  res = bitwise_ops.bitwise_or(lhs, rhs)
1733  tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
1734```
1735  }];
1736
1737  let arguments = (ins
1738    TF_IntTensor:$x,
1739    TF_IntTensor:$y
1740  );
1741
1742  let results = (outs
1743    TF_IntTensor:$z
1744  );
1745
1746  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1747}
1748
1749def TF_BitwiseXorOp : TF_Op<"BitwiseXor", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1750                      WithBroadcastableBinOpBuilder {
1751  let summary = "Elementwise computes the bitwise XOR of `x` and `y`.";
1752
1753  let description = [{
1754The result will have those bits set, that are different in `x` and `y`. The
1755computation is performed on the underlying representations of `x` and `y`.
1756
1757For example:
1758
1759```python
1760import tensorflow as tf
1761from tensorflow.python.ops import bitwise_ops
1762dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1763              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1764
1765for dtype in dtype_list:
1766  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1767  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1768  exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
1769
1770  res = bitwise_ops.bitwise_xor(lhs, rhs)
1771  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1772```
1773  }];
1774
1775  let arguments = (ins
1776    TF_IntTensor:$x,
1777    TF_IntTensor:$y
1778  );
1779
1780  let results = (outs
1781    TF_IntTensor:$z
1782  );
1783
1784  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1785}
1786
1787def TF_BoostedTreesBucketizeOp : TF_Op<"BoostedTreesBucketize", [NoSideEffect, SameVariadicOperandSize]> {
1788  let summary = "Bucketize each feature based on bucket boundaries.";
1789
1790  let description = [{
1791An op that returns a list of float tensors, where each tensor represents the
1792bucketized values for a single feature.
1793  }];
1794
1795  let arguments = (ins
1796    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensor each containing float values for a single feature.}]>:$float_values,
1797    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensors each containing the bucket boundaries for a single
1798feature.}]>:$bucket_boundaries
1799  );
1800
1801  let results = (outs
1802    Res<Variadic<TF_Int32Tensor>, [{int; List of Rank 1 Tensors each containing the bucketized values for a single feature.}]>:$buckets
1803  );
1804
1805  TF_DerivedOperandSizeAttr num_features = TF_DerivedOperandSizeAttr<0>;
1806}
1807
1808def TF_BroadcastArgsOp : TF_Op<"BroadcastArgs", [NoSideEffect]> {
1809  let summary = "Return the shape of s0 op s1 with broadcast.";
1810
1811  let description = [{
1812Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
1813broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
1814  }];
1815
1816  let arguments = (ins
1817    TF_I32OrI64Tensor:$s0,
1818    TF_I32OrI64Tensor:$s1
1819  );
1820
1821  let results = (outs
1822    TF_I32OrI64Tensor:$r0
1823  );
1824
1825  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1826}
1827
1828def TF_BroadcastGradientArgsOp : TF_Op<"BroadcastGradientArgs", [NoSideEffect, SameOperandsAndResultElementType, TF_OperandHasRank<0, 1>, TF_OperandHasRank<1, 1>, TF_ResultHasRank<0, 1>, TF_ResultHasRank<1, 1>]> {
1829  let summary = [{
1830Return the reduction indices for computing gradients of s0 op s1 with broadcast.
1831  }];
1832
1833  let description = [{
1834This is typically used by gradient computations for a broadcasting operation.
1835  }];
1836
1837  let arguments = (ins
1838    TF_I32OrI64Tensor:$s0,
1839    TF_I32OrI64Tensor:$s1
1840  );
1841
1842  let results = (outs
1843    TF_I32OrI64Tensor:$r0,
1844    TF_I32OrI64Tensor:$r1
1845  );
1846
1847  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1848
1849  let verifier = [{
1850    return Verify(*this);
1851  }];
1852
1853  let hasFolder = 1;
1854}
1855
1856def TF_BroadcastToOp : TF_Op<"BroadcastTo", [NoSideEffect]> {
1857  let summary = "Broadcast an array for a compatible shape.";
1858
1859  let description = [{
1860Broadcasting is the process of making arrays to have compatible shapes
1861for arithmetic operations. Two shapes are compatible if for each
1862dimension pair they are either equal or one of them is one. When trying
1863to broadcast a Tensor to a shape, it starts with the trailing dimensions,
1864and works its way forward.
1865
1866For example,
1867
1868>>> x = tf.constant([1, 2, 3])
1869>>> y = tf.broadcast_to(x, [3, 3])
1870>>> print(y)
1871tf.Tensor(
1872    [[1 2 3]
1873     [1 2 3]
1874     [1 2 3]], shape=(3, 3), dtype=int32)
1875
1876In the above example, the input Tensor with the shape of `[1, 3]`
1877is broadcasted to output Tensor with shape of `[3, 3]`.
1878
1879When doing broadcasted operations such as multiplying a tensor
1880by a scalar, broadcasting (usually) confers some time or space
1881benefit, as the broadcasted tensor is never materialized.
1882
1883However, `broadcast_to` does not carry with it any such benefits.
1884The newly-created tensor takes the full memory of the broadcasted
1885shape. (In a graph context, `broadcast_to` might be fused to
1886subsequent operation and then be optimized away, however.)
1887  }];
1888
1889  let arguments = (ins
1890    Arg<TF_Tensor, [{A Tensor to broadcast.}]>:$input,
1891    Arg<TF_I32OrI64Tensor, [{An 1-D `int` Tensor. The shape of the desired output.}]>:$shape
1892  );
1893
1894  let results = (outs
1895    Res<TF_Tensor, [{A Tensor.}]>:$output
1896  );
1897
1898  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1899  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1900
1901  let verifier = [{
1902    return Verify(*this);
1903  }];
1904  let hasFolder = 1;
1905}
1906
1907def TF_BucketizeOp : TF_Op<"Bucketize", [NoSideEffect, SameOperandsAndResultShape]> {
1908  let summary = "Bucketizes 'input' based on 'boundaries'.";
1909
1910  let description = [{
1911For example, if the inputs are
1912    boundaries = [0, 10, 100]
1913    input = [[-5, 10000]
1914             [150,   10]
1915             [5,    100]]
1916
1917then the output will be
1918    output = [[0, 3]
1919              [3, 2]
1920              [1, 3]]
1921  }];
1922
1923  let arguments = (ins
1924    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Any shape of Tensor contains with int or float type.}]>:$input,
1925
1926    F32ArrayAttr:$boundaries
1927  );
1928
1929  let results = (outs
1930    Res<TF_Int32Tensor, [{Same shape with 'input', each value of input replaced with bucket index.
1931
1932@compatibility(numpy)
1933Equivalent to np.digitize.
1934@end_compatibility}]>:$output
1935  );
1936
1937  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1938}
1939
1940def TF_CacheDatasetV2Op : TF_Op<"CacheDatasetV2", []> {
1941  let summary = "";
1942
1943  let arguments = (ins
1944    TF_VariantTensor:$input_dataset,
1945    TF_StrTensor:$filename,
1946    Arg<TF_ResourceTensor, "", [TF_DatasetMemoryCacheRead, TF_DatasetMemoryCacheWrite]>:$cache,
1947
1948    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
1949    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
1950  );
1951
1952  let results = (outs
1953    TF_VariantTensor:$handle
1954  );
1955}
1956
1957def TF_CastOp : TF_Op<"Cast", [NoSideEffect, SameOperandsAndResultShape]> {
1958  let summary = "Cast x of type SrcT to y of DstT.";
1959
1960  let arguments = (ins
1961    TF_Tensor:$x,
1962
1963    DefaultValuedAttr<BoolAttr, "false">:$Truncate
1964  );
1965
1966  let results = (outs
1967    TF_Tensor:$y
1968  );
1969
1970  TF_DerivedOperandTypeAttr SrcT = TF_DerivedOperandTypeAttr<0>;
1971  TF_DerivedResultTypeAttr DstT = TF_DerivedResultTypeAttr<0>;
1972
1973  let hasFolder = 1;
1974}
1975
1976def TF_CeilOp : TF_Op<"Ceil", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
1977  let summary = "Returns element-wise smallest integer not less than x.";
1978
1979  let arguments = (ins
1980    TF_FloatTensor:$x
1981  );
1982
1983  let results = (outs
1984    TF_FloatTensor:$y
1985  );
1986
1987  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1988}
1989
1990def TF_CheckNumericsOp : TF_Op<"CheckNumerics", [TF_SameOperandsAndResultTypeResolveRef]> {
1991  let summary = "Checks a tensor for NaN and Inf values.";
1992
1993  let description = [{
1994When run, reports an `InvalidArgument` error if `tensor` has any values
1995that are not a number (NaN) or infinity (Inf). Otherwise, returns the input
1996tensor.
1997
1998Example usage:
1999
2000``` python
2001a = tf.Variable(1.0)
2002tf.debugging.check_numerics(a, message='')
2003
2004b = tf.Variable(np.nan)
2005try:
2006  tf.debugging.check_numerics(b, message='Checking b')
2007except Exception as e:
2008  assert "Checking b : Tensor had NaN values" in e.message
2009
2010c = tf.Variable(np.inf)
2011try:
2012  tf.debugging.check_numerics(c, message='Checking c')
2013except Exception as e:
2014  assert "Checking c : Tensor had Inf values" in e.message
2015```
2016  }];
2017
2018  let arguments = (ins
2019    TF_FloatTensor:$tensor,
2020
2021    StrAttr:$message
2022  );
2023
2024  let results = (outs
2025    TF_FloatTensor:$output
2026  );
2027
2028  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2029}
2030
2031def TF_CholeskyOp : TF_Op<"Cholesky", [NoSideEffect]> {
2032  let summary = [{
2033Computes the Cholesky decomposition of one or more square matrices.
2034  }];
2035
2036  let description = [{
2037The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
2038form square matrices.
2039
2040The input has to be symmetric and positive definite. Only the lower-triangular
2041part of the input will be used for this operation. The upper-triangular part
2042will not be read.
2043
2044The output is a tensor of the same shape as the input
2045containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
2046
2047**Note**: The gradient computation on GPU is faster for large matrices but
2048not for large batch dimensions when the submatrices are small. In this
2049case it might be faster to use the CPU.
2050  }];
2051
2052  let arguments = (ins
2053    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input
2054  );
2055
2056  let results = (outs
2057    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$output
2058  );
2059
2060  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2061}
2062
2063def TF_ClipByValueOp : TF_Op<"ClipByValue", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
2064  let summary = "Clips tensor values to a specified min and max.";
2065
2066  let description = [{
2067Given a tensor `t`, this operation returns a tensor of the same type and
2068shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
2069Any values less than `clip_value_min` are set to `clip_value_min`. Any values
2070greater than `clip_value_max` are set to `clip_value_max`.
2071  }];
2072
2073  let arguments = (ins
2074    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`.}]>:$t,
2075    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
2076as `t`. The minimum value to clip by.}]>:$clip_value_min,
2077    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
2078as `t`. The maximum value to clip by.}]>:$clip_value_max
2079  );
2080
2081  let results = (outs
2082    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A clipped `Tensor` with the same shape as input 't'.}]>:$output
2083  );
2084
2085  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2086}
2087
2088def TF_CollectiveBcastRecvOp : TF_Op<"CollectiveBcastRecv", []> {
2089  let summary = "Receives a tensor value broadcast from another device.";
2090
2091  let arguments = (ins
2092    I64Attr:$group_size,
2093    I64Attr:$group_key,
2094    I64Attr:$instance_key,
2095    TF_ShapeAttr:$shape,
2096    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
2097    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
2098  );
2099
2100  let results = (outs
2101    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
2102  );
2103
2104  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
2105}
2106
2107def TF_CollectiveBcastSendOp : TF_Op<"CollectiveBcastSend", []> {
2108  let summary = "Broadcasts a tensor value to one or more other devices.";
2109
2110  let arguments = (ins
2111    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
2112
2113    I64Attr:$group_size,
2114    I64Attr:$group_key,
2115    I64Attr:$instance_key,
2116    TF_ShapeAttr:$shape,
2117    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
2118    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
2119  );
2120
2121  let results = (outs
2122    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
2123  );
2124
2125  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2126}
2127
2128def TF_CollectiveGatherOp : TF_Op<"CollectiveGather", []> {
2129  let summary = [{
2130Mutually accumulates multiple tensors of identical type and shape.
2131  }];
2132
2133  let arguments = (ins
2134    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
2135
2136    I64Attr:$group_size,
2137    I64Attr:$group_key,
2138    I64Attr:$instance_key,
2139    TF_ShapeAttr:$shape,
2140    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
2141    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
2142  );
2143
2144  let results = (outs
2145    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
2146  );
2147
2148  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2149}
2150
2151def TF_CollectivePermuteOp : TF_Op<"CollectivePermute", []> {
2152  let summary = "An Op to permute tensors across replicated TPU instances.";
2153
2154  let description = [{
2155Each instance supplies its own input.
2156
2157For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
2158source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
2159`[D, A, B, C]`.
2160  }];
2161
2162  let arguments = (ins
2163    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The local input to be permuted. Currently only supports float and
2164bfloat16.}]>:$input,
2165    Arg<TF_Int32Tensor, [{A tensor with shape [num_pairs, 2].}]>:$source_target_pairs
2166  );
2167
2168  let results = (outs
2169    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The permuted input.}]>:$output
2170  );
2171
2172  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2173}
2174
2175def TF_CollectiveReduceOp : TF_Op<"CollectiveReduce", [TF_SameOperandsAndResultTypeResolveRef]> {
2176  let summary = [{
2177Mutually reduces multiple tensors of identical type and shape.
2178  }];
2179
2180  let arguments = (ins
2181    TF_FpOrI32OrI64Tensor:$input,
2182
2183    I64Attr:$group_size,
2184    I64Attr:$group_key,
2185    I64Attr:$instance_key,
2186    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
2187    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
2188    I64ArrayAttr:$subdiv_offsets,
2189    DefaultValuedAttr<I64ArrayAttr, "{}">:$wait_for,
2190    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
2191    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
2192  );
2193
2194  let results = (outs
2195    TF_FpOrI32OrI64Tensor:$data
2196  );
2197
2198  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2199}
2200
2201def TF_CollectiveReduceV2Op : TF_Op<"CollectiveReduceV2", []> {
2202  let summary = [{
2203Mutually reduces multiple tensors of identical type and shape.
2204  }];
2205
2206  let arguments = (ins
2207    TF_FpOrI32OrI64Tensor:$input,
2208    TF_Int32Tensor:$group_size,
2209    TF_Int32Tensor:$group_key,
2210    TF_Int32Tensor:$instance_key,
2211    Variadic<TF_ResourceTensor>:$ordering_token,
2212
2213    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
2214    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
2215    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
2216    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds,
2217    DefaultValuedAttr<I64Attr, "-1">:$max_subdivs_per_device
2218  );
2219
2220  let results = (outs
2221    TF_FpOrI32OrI64Tensor:$data
2222  );
2223
2224  TF_DerivedOperandSizeAttr Nordering_token = TF_DerivedOperandSizeAttr<4>;
2225  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2226}
2227
2228def TF_ComplexOp : TF_Op<"Complex", [NoSideEffect, ResultsBroadcastableShape]> {
2229  let summary = "Converts two real numbers to a complex number.";
2230
2231  let description = [{
2232Given a tensor `real` representing the real part of a complex number, and a
2233tensor `imag` representing the imaginary part of a complex number, this
2234operation returns complex numbers elementwise of the form \\(a + bj\\), where
2235*a* represents the `real` part and *b* represents the `imag` part.
2236
2237The input tensors `real` and `imag` must have the same shape.
2238
2239For example:
2240
2241```
2242# tensor 'real' is [2.25, 3.25]
2243# tensor `imag` is [4.75, 5.75]
2244tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
2245```
2246  }];
2247
2248  let arguments = (ins
2249    TF_F32OrF64Tensor:$real,
2250    TF_F32OrF64Tensor:$imag
2251  );
2252
2253  let results = (outs
2254    TensorOf<[TF_Complex128, TF_Complex64]>:$out
2255  );
2256
2257  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2258  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
2259}
2260
2261def TF_ComplexAbsOp : TF_Op<"ComplexAbs", [NoSideEffect, SameOperandsAndResultShape]> {
2262  let summary = "Computes the complex absolute value of a tensor.";
2263
2264  let description = [{
2265Given a tensor `x` of complex numbers, this operation returns a tensor of type
2266`float` or `double` that is the absolute value of each element in `x`. All
2267elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
2268value is computed as \\( \sqrt{a^2 + b^2}\\).
2269
2270For example:
2271
2272>>> x = tf.complex(3.0, 4.0)
2273>>> print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy())
22745.0
2275  }];
2276
2277  let arguments = (ins
2278    TensorOf<[TF_Complex128, TF_Complex64]>:$x
2279  );
2280
2281  let results = (outs
2282    TF_F32OrF64Tensor:$y
2283  );
2284
2285  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2286  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
2287}
2288
2289def TF_ConcatOp : TF_Op<"Concat", [NoSideEffect]> {
2290  let summary = "Concatenates tensors along one dimension.";
2291
2292  let arguments = (ins
2293    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
2294range [0, rank(values)).}]>:$concat_dim,
2295    Arg<Variadic<TF_Tensor>, [{The `N` Tensors to concatenate. Their ranks and types must match,
2296and their sizes must match in all dimensions except `concat_dim`.}]>:$values
2297  );
2298
2299  let results = (outs
2300    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
2301`concat_dim` dimension.  This tensor's shape matches that of `values` except
2302in `concat_dim` where it has the sum of the sizes.}]>:$output
2303  );
2304
2305  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2306  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2307
2308  let verifier = [{
2309    return Verify(*this);
2310  }];
2311
2312  let hasCanonicalizer = 1;
2313}
2314
2315def TF_ConcatOffsetOp : TF_Op<"ConcatOffset", [NoSideEffect]> {
2316  let summary = "Computes offsets of concat inputs within its output.";
2317
2318  let description = [{
2319For example:
2320
2321```
2322# 'x' is [2, 2, 7]
2323# 'y' is [2, 3, 7]
2324# 'z' is [2, 5, 7]
2325concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
2326```
2327
2328This is typically used by gradient computations for a concat operation.
2329  }];
2330
2331  let arguments = (ins
2332    Arg<TF_Int32Tensor, [{The dimension along which to concatenate.}]>:$concat_dim,
2333    Arg<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing shape of tensors being concatenated.}]>:$shape
2334  );
2335
2336  let results = (outs
2337    Res<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing the starting offset
2338of input tensors within the concatenated output.}]>:$offset
2339  );
2340
2341  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2342
2343  let verifier = [{
2344    return Verify(*this);
2345  }];
2346
2347  let hasFolder = 1;
2348}
2349
2350def TF_ConcatV2Op : TF_Op<"ConcatV2", [NoSideEffect]> {
2351  let summary = "Concatenates tensors along one dimension.";
2352
2353  let arguments = (ins
2354    Arg<Variadic<TF_Tensor>, [{List of `N` Tensors to concatenate. Their ranks and types must match,
2355and their sizes must match in all dimensions except `concat_dim`.}]>:$values,
2356    Arg<TF_I32OrI64Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
2357range [-rank(values), rank(values)).}]>:$axis
2358  );
2359
2360  let results = (outs
2361    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
2362`concat_dim` dimension.  This tensor's shape matches that of `values` except
2363in `concat_dim` where it has the sum of the sizes.}]>:$output
2364  );
2365
2366  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
2367  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2368  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2369
2370  let verifier = [{
2371    return Verify(*this);
2372  }];
2373
2374  let hasCanonicalizer = 1;
2375}
2376
2377def TF_ConfigureDistributedTPUOp : TF_Op<"ConfigureDistributedTPU", []> {
2378  let summary = [{
2379Sets up the centralized structures for a distributed TPU system.
2380  }];
2381
2382  let arguments = (ins
2383    DefaultValuedAttr<StrAttr, "">:$embedding_config,
2384    DefaultValuedAttr<StrAttr, "">:$tpu_embedding_config,
2385    DefaultValuedAttr<BoolAttr, "false">:$is_global_init,
2386    DefaultValuedAttr<BoolAttr, "false">:$enable_whole_mesh_compilations,
2387    DefaultValuedAttr<BoolAttr, "true">:$compilation_failure_closes_chips
2388  );
2389
2390  let results = (outs
2391    Res<TF_StrTensor, [{A serialized tensorflow.tpu.TopologyProto that describes the TPU
2392topology.}]>:$topology
2393  );
2394}
2395
2396def TF_ConfigureTPUEmbeddingOp : TF_Op<"ConfigureTPUEmbedding", []> {
2397  let summary = "Sets up TPUEmbedding in a distributed TPU system.";
2398
2399  let arguments = (ins
2400    StrAttr:$config
2401  );
2402
2403  let results = (outs);
2404}
2405
2406def TF_ConjOp : TF_Op<"Conj", [Involution, NoSideEffect, SameOperandsAndResultType]> {
2407  let summary = "Returns the complex conjugate of a complex number.";
2408
2409  let description = [{
2410Given a tensor `input` of complex numbers, this operation returns a tensor of
2411complex numbers that are the complex conjugate of each element in `input`. The
2412complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
2413real part and *b* is the imaginary part.
2414
2415The complex conjugate returned by this operation is of the form \\(a - bj\\).
2416
2417For example:
2418
2419```
2420# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
2421tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
2422```
2423  }];
2424
2425  let arguments = (ins
2426    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$input
2427  );
2428
2429  let results = (outs
2430    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$output
2431  );
2432
2433  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2434}
2435
2436def TF_ConjugateTransposeOp : TF_Op<"ConjugateTranspose", [NoSideEffect]> {
2437  let summary = [{
2438Shuffle dimensions of x according to a permutation and conjugate the result.
2439  }];
2440
2441  let description = [{
2442The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
2443  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
2444  `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
2445  }];
2446
2447  let arguments = (ins
2448    TF_Tensor:$x,
2449    TF_I32OrI64Tensor:$perm
2450  );
2451
2452  let results = (outs
2453    TF_Tensor:$y
2454  );
2455
2456  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2457  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
2458}
2459
2460def TF_Conv2DOp : TF_Op<"Conv2D", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect, TF_LayoutSensitiveInterface]> {
2461  let summary = [{
2462Computes a 2-D convolution given 4-D `input` and `filter` tensors.
2463  }];
2464
2465  let description = [{
2466Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
2467and a filter / kernel tensor of shape
2468`[filter_height, filter_width, in_channels, out_channels]`, this op
2469performs the following:
2470
24711. Flattens the filter to a 2-D matrix with shape
2472   `[filter_height * filter_width * in_channels, output_channels]`.
24732. Extracts image patches from the input tensor to form a *virtual*
2474   tensor of shape `[batch, out_height, out_width,
2475   filter_height * filter_width * in_channels]`.
24763. For each patch, right-multiplies the filter matrix and the image patch
2477   vector.
2478
2479In detail, with the default NHWC format,
2480
2481    output[b, i, j, k] =
2482        sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
2483                        filter[di, dj, q, k]
2484
2485Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
2486horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
2487  }];
2488
2489  let arguments = (ins
2490    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is interpreted according to the value
2491of `data_format`, see below for details.}]>:$input,
2492    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor of shape
2493`[filter_height, filter_width, in_channels, out_channels]`}]>:$filter,
2494
2495    I64ArrayAttr:$strides,
2496    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2497    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2498    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2499    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2500    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2501  );
2502
2503  let results = (outs
2504    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is determined by the value of
2505`data_format`, see below for details.}]>:$output
2506  );
2507
2508  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2509
2510  let verifier = [{
2511    return Verify(*this);
2512  }];
2513
2514  let extraClassDeclaration = [{
2515    // TF_LayoutSensitiveInterface:
2516    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
2517    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
2518    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2519    LogicalResult UpdateDataFormat(StringRef data_format);
2520    // InferTypeOpInterface:
2521    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
2522      return ArraysAreCastCompatible(l, r);
2523    }
2524  }];
2525}
2526
2527def TF_Conv2DBackpropFilterOp : TF_Op<"Conv2DBackpropFilter", [NoSideEffect, TF_LayoutSensitiveInterface]> {
2528  let summary = [{
2529Computes the gradients of convolution with respect to the filter.
2530  }];
2531
2532  let arguments = (ins
2533    Arg<TF_FloatTensor, [{4-D with shape `[batch, in_height, in_width, in_channels]`.}]>:$input,
2534    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2535where `filter` is a 4-D
2536`[filter_height, filter_width, in_channels, out_channels]` tensor.}]>:$filter_sizes,
2537    Arg<TF_FloatTensor, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2538Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2539
2540    I64ArrayAttr:$strides,
2541    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2542    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2543    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2544    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2545    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2546  );
2547
2548  let results = (outs
2549    Res<TF_FloatTensor, [{4-D with shape
2550`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
2551the `filter` input of the convolution.}]>:$output
2552  );
2553
2554  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2555
2556  let extraClassDeclaration = [{
2557    // TF_LayoutSensitiveInterface:
2558    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0, 2}; }
2559    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {}; }
2560    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2561    LogicalResult UpdateDataFormat(StringRef data_format);
2562  }];
2563}
2564
2565def TF_Conv2DBackpropInputOp : TF_Op<"Conv2DBackpropInput", [NoSideEffect, TF_LayoutSensitiveInterface]> {
2566  let summary = [{
2567Computes the gradients of convolution with respect to the input.
2568  }];
2569
2570  let arguments = (ins
2571    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`,
2572where `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
2573    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape
2574`[filter_height, filter_width, in_channels, out_channels]`.}]>:$filter,
2575    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2576Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2577
2578    I64ArrayAttr:$strides,
2579    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2580    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2581    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2582    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2583    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2584  );
2585
2586  let results = (outs
2587    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
2588w.r.t. the input of the convolution.}]>:$output
2589  );
2590
2591  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2592
2593  let verifier = [{
2594    return Verify(*this);
2595  }];
2596
2597  let extraClassDeclaration = [{
2598    // TF_LayoutSensitiveInterface:
2599    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {2}; }
2600    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
2601    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2602    LogicalResult UpdateDataFormat(StringRef data_format);
2603  }];
2604}
2605
2606def TF_Conv3DOp : TF_Op<"Conv3D", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect]> {
2607  let summary = [{
2608Computes a 3-D convolution given 5-D `input` and `filter` tensors.
2609  }];
2610
2611  let description = [{
2612In signal processing, cross-correlation is a measure of similarity of
2613two waveforms as a function of a time-lag applied to one of them. This
2614is also known as a sliding dot product or sliding inner-product.
2615
2616Our Conv3D implements a form of cross-correlation.
2617  }];
2618
2619  let arguments = (ins
2620    Arg<TF_FloatTensor, [{Shape `[batch, in_depth, in_height, in_width, in_channels]`.}]>:$input,
2621    Arg<TF_FloatTensor, [{Shape `[filter_depth, filter_height, filter_width, in_channels,
2622out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filter,
2623
2624    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2625    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2626    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2627    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2628  );
2629
2630  let results = (outs
2631    TF_FloatTensor:$output
2632  );
2633
2634  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2635
2636  let verifier = [{
2637    return Verify(*this);
2638  }];
2639
2640  let extraClassDeclaration = [{
2641    // InferTypeOpInterface:
2642    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
2643      return ArraysAreCastCompatible(l, r);
2644    }
2645  }];
2646
2647}
2648
2649def TF_Conv3DBackpropFilterV2Op : TF_Op<"Conv3DBackpropFilterV2", [NoSideEffect]> {
2650  let summary = [{
2651Computes the gradients of 3-D convolution with respect to the filter.
2652  }];
2653
2654  let arguments = (ins
2655    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, in_channels]`.}]>:$input,
2656    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2657where `filter` is a 5-D
2658`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
2659tensor.}]>:$filter_sizes,
2660    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2661out_channels]`.}]>:$out_backprop,
2662
2663    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2664    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2665    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2666    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2667  );
2668
2669  let results = (outs
2670    TF_FloatTensor:$output
2671  );
2672
2673  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2674}
2675
2676def TF_Conv3DBackpropInputV2Op : TF_Op<"Conv3DBackpropInputV2", [NoSideEffect]> {
2677  let summary = [{
2678Computes the gradients of 3-D convolution with respect to the input.
2679  }];
2680
2681  let arguments = (ins
2682    Arg<TF_I32OrI64Tensor, [{An integer vector representing the tensor shape of `input`,
2683where `input` is a 5-D
2684`[batch, depth, rows, cols, in_channels]` tensor.}]>:$input_sizes,
2685    Arg<TF_FloatTensor, [{Shape `[depth, rows, cols, in_channels, out_channels]`.
2686`in_channels` must match between `input` and `filter`.}]>:$filter,
2687    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2688out_channels]`.}]>:$out_backprop,
2689
2690    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2691    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2692    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2693    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2694  );
2695
2696  let results = (outs
2697    TF_FloatTensor:$output
2698  );
2699
2700  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2701  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
2702}
2703
2704def TF_CosOp : TF_Op<"Cos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2705  let summary = "Computes cos of x element-wise.";
2706
2707  let description = [{
2708Given an input tensor, this function computes cosine of every
2709  element in the tensor. Input range is `(-inf, inf)` and
2710  output range is `[-1,1]`. If input lies outside the boundary, `nan`
2711  is returned.
2712
2713  ```python
2714  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
2715  tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
2716  ```
2717  }];
2718
2719  let arguments = (ins
2720    TF_FpOrComplexTensor:$x
2721  );
2722
2723  let results = (outs
2724    TF_FpOrComplexTensor:$y
2725  );
2726
2727  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2728}
2729
2730def TF_CoshOp : TF_Op<"Cosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2731  let summary = "Computes hyperbolic cosine of x element-wise.";
2732
2733  let description = [{
2734Given an input tensor, this function computes hyperbolic cosine of every
2735  element in the tensor. Input range is `[-inf, inf]` and output range
2736  is `[1, inf]`.
2737
2738  ```python
2739  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
2740  tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
2741  ```
2742  }];
2743
2744  let arguments = (ins
2745    TF_FpOrComplexTensor:$x
2746  );
2747
2748  let results = (outs
2749    TF_FpOrComplexTensor:$y
2750  );
2751
2752  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2753}
2754
2755def TF_CrossOp : TF_Op<"Cross", [NoSideEffect, SameOperandsAndResultType]> {
2756  let summary = "Compute the pairwise cross product.";
2757
2758  let description = [{
2759`a` and `b` must be the same shape; they can either be simple 3-element vectors,
2760or any shape where the innermost dimension is 3. In the latter case, each pair
2761of corresponding 3-element vectors is cross-multiplied independently.
2762  }];
2763
2764  let arguments = (ins
2765    Arg<TF_IntOrFpTensor, [{A tensor containing 3-element vectors.}]>:$a,
2766    Arg<TF_IntOrFpTensor, [{Another tensor, of same type and shape as `a`.}]>:$b
2767  );
2768
2769  let results = (outs
2770    Res<TF_IntOrFpTensor, [{Pairwise cross product of the vectors in `a` and `b`.}]>:$product
2771  );
2772
2773  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2774}
2775
2776def TF_CrossReplicaSumOp : TF_Op<"CrossReplicaSum", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>, TF_NoConstantFold]> {
2777  let summary = "An Op to sum inputs across replicated TPU instances.";
2778
2779  let description = [{
2780Each instance supplies its own input.
2781
2782For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
2783Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
2784and `B, D, F, H` as group 1. Thus we get the outputs:
2785`[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
2786  }];
2787
2788  let arguments = (ins
2789    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Uint32]>, [{The local input to the sum.}]>:$input,
2790    Arg<TF_Int32Tensor, [{An int32 tensor with shape
2791[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
2792replica ids in the ith subgroup.}]>:$group_assignment
2793  );
2794
2795  let results = (outs
2796    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Uint32]>, [{The sum of all the distributed inputs.}]>:$output
2797  );
2798
2799  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2800}
2801
2802def TF_CumprodOp : TF_Op<"Cumprod", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2803  let summary = [{
2804Compute the cumulative product of the tensor `x` along `axis`.
2805  }];
2806
2807  let description = [{
2808By default, this op performs an inclusive cumprod, which means that the first
2809element of the input is identical to the first element of the output:
2810
2811```python
2812tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
2813```
2814
2815By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
2816performed instead:
2817
2818```python
2819tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
2820```
2821
2822By setting the `reverse` kwarg to `True`, the cumprod is performed in the
2823opposite direction:
2824
2825```python
2826tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
2827```
2828
2829This is more efficient than using separate `tf.reverse` ops.
2830
2831The `reverse` and `exclusive` kwargs can also be combined:
2832
2833```python
2834tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
2835```
2836  }];
2837
2838  let arguments = (ins
2839    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2840`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2841`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2842    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2843`[-rank(x), rank(x))`.}]>:$axis,
2844
2845    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2846    DefaultValuedAttr<BoolAttr, "false">:$reverse
2847  );
2848
2849  let results = (outs
2850    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2851  );
2852
2853  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2854  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2855
2856  let verifier = [{
2857    return Verify(*this);
2858  }];
2859}
2860
2861def TF_CumsumOp : TF_Op<"Cumsum", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2862  let summary = "Compute the cumulative sum of the tensor `x` along `axis`.";
2863
2864  let description = [{
2865By default, this op performs an inclusive cumsum, which means that the first
2866element of the input is identical to the first element of the output:
2867
2868```python
2869tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
2870```
2871
2872By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
2873performed instead:
2874
2875```python
2876tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
2877```
2878
2879By setting the `reverse` kwarg to `True`, the cumsum is performed in the
2880opposite direction:
2881
2882```python
2883tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
2884```
2885
2886This is more efficient than using separate `tf.reverse` ops.
2887
2888The `reverse` and `exclusive` kwargs can also be combined:
2889
2890```python
2891tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
2892```
2893  }];
2894
2895  let arguments = (ins
2896    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2897`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2898`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2899    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2900`[-rank(x), rank(x))`.}]>:$axis,
2901
2902    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2903    DefaultValuedAttr<BoolAttr, "false">:$reverse
2904  );
2905
2906  let results = (outs
2907    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2908  );
2909
2910  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2911  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2912
2913  let verifier = [{
2914    return Verify(*this);
2915  }];
2916}
2917
2918def TF_DataFormatDimMapOp : TF_Op<"DataFormatDimMap", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2919  let summary = [{
2920Returns the dimension index in the destination data format given the one in
2921  }];
2922
2923  let description = [{
2924the source data format.
2925  }];
2926
2927  let arguments = (ins
2928    Arg<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in source data format.
2929Must be in the range [-4, 4).}]>:$x,
2930
2931    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2932    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2933  );
2934
2935  let results = (outs
2936    Res<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in destination data format.}]>:$y
2937  );
2938
2939  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2940}
2941
2942def TF_DataFormatVecPermuteOp : TF_Op<"DataFormatVecPermute", [NoSideEffect, SameOperandsAndResultType]> {
2943  let summary = "Permute input tensor from `src_format` to `dst_format`.";
2944
2945  let description = [{
2946Input tensor must be a vector of size 4, or a 4x2 tensor.
2947
2948For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
2949```
2950[1, 2, 3, 4]
2951```
2952and
2953```
2954[[1, 2, 3, 4],
2955 [5, 6, 7, 8]]
2956```
2957, the outputs will be (respectively):
2958```
2959[1, 4, 2, 3]
2960```
2961and
2962```
2963[[1, 4, 2, 3],
2964 [5, 8, 6, 7]]
2965```
2966  }];
2967
2968  let arguments = (ins
2969    Arg<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in source data format.}]>:$x,
2970
2971    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2972    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2973  );
2974
2975  let results = (outs
2976    Res<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in destination data format.}]>:$y
2977  );
2978
2979  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2980
2981  let verifier = [{ return Verify(*this); }];
2982}
2983
2984def TF_DebugIdentityV2Op : TF_Op<"DebugIdentityV2", []> {
2985  let summary = "Debug Identity V2 Op.";
2986
2987  let description = [{
2988Provides an identity mapping from input to output, while writing the content of
2989the input tensor by calling DebugEventsWriter.
2990
2991The semantics of the input tensor depends on tensor_debug_mode. In typical
2992usage, the input tensor comes directly from the user computation only when
2993graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
2994list of all the possible values of graph_debug_mode). For the other debug modes,
2995the input tensor should be produced by an additional op or subgraph that
2996computes summary information about one or more tensors.
2997  }];
2998
2999  let arguments = (ins
3000    Arg<TF_Tensor, [{Input tensor, non-Reference type}]>:$input,
3001
3002    DefaultValuedAttr<StrAttr, "">:$tfdbg_context_id,
3003    DefaultValuedAttr<StrAttr, "">:$op_name,
3004    DefaultValuedAttr<I64Attr, "-1">:$output_slot,
3005    DefaultValuedAttr<I64Attr, "-1">:$tensor_debug_mode,
3006    DefaultValuedAttr<StrArrayAttr, "{}">:$debug_urls,
3007    DefaultValuedAttr<I64Attr, "1000">:$circular_buffer_size,
3008    DefaultValuedAttr<StrAttr, "">:$tfdbg_run_id
3009  );
3010
3011  let results = (outs
3012    TF_Tensor:$output
3013  );
3014
3015  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3016}
3017
3018def TF_DecodeAndCropJpegOp : TF_Op<"DecodeAndCropJpeg", [NoSideEffect]> {
3019  let summary = "Decode and Crop a JPEG-encoded image to a uint8 tensor.";
3020
3021  let description = [{
3022The attr `channels` indicates the desired number of color channels for the
3023decoded image.
3024
3025Accepted values are:
3026
3027*   0: Use the number of channels in the JPEG-encoded image.
3028*   1: output a grayscale image.
3029*   3: output an RGB image.
3030
3031If needed, the JPEG-encoded image is transformed to match the requested number
3032of color channels.
3033
3034The attr `ratio` allows downscaling the image by an integer factor during
3035decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
3036downscaling the image later.
3037
3038
3039It is equivalent to a combination of decode and crop, but much faster by only
3040decoding partial jpeg image.
3041  }];
3042
3043  let arguments = (ins
3044    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
3045    Arg<TF_Int32Tensor, [{1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].}]>:$crop_window,
3046
3047    DefaultValuedAttr<I64Attr, "0">:$channels,
3048    DefaultValuedAttr<I64Attr, "1">:$ratio,
3049    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
3050    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
3051    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
3052    DefaultValuedAttr<StrAttr, "">:$dct_method
3053  );
3054
3055  let results = (outs
3056    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
3057  );
3058}
3059
3060def TF_DecodeGifOp : TF_Op<"DecodeGif", [NoSideEffect]> {
3061  let summary = "Decode the frame(s) of a GIF-encoded image to a uint8 tensor.";
3062
3063  let description = [{
3064GIF images with frame or transparency compression are not supported.
3065On Linux and MacOS systems, convert animated GIFs from compressed to
3066uncompressed by running:
3067
3068    convert $src.gif -coalesce $dst.gif
3069
3070This op also supports decoding JPEGs and PNGs, though it is cleaner to use
3071`tf.io.decode_image`.
3072  }];
3073
3074  let arguments = (ins
3075    Arg<TF_StrTensor, [{0-D.  The GIF-encoded image.}]>:$contents
3076  );
3077
3078  let results = (outs
3079    Res<TF_Uint8Tensor, [{4-D with shape `[num_frames, height, width, 3]`. RGB channel order.}]>:$image
3080  );
3081}
3082
3083def TF_DecodeJpegOp : TF_Op<"DecodeJpeg", [NoSideEffect]> {
3084  let summary = "Decode a JPEG-encoded image to a uint8 tensor.";
3085
3086  let description = [{
3087The attr `channels` indicates the desired number of color channels for the
3088decoded image.
3089
3090Accepted values are:
3091
3092*   0: Use the number of channels in the JPEG-encoded image.
3093*   1: output a grayscale image.
3094*   3: output an RGB image.
3095
3096If needed, the JPEG-encoded image is transformed to match the requested number
3097of color channels.
3098
3099The attr `ratio` allows downscaling the image by an integer factor during
3100decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
3101downscaling the image later.
3102
3103
3104This op also supports decoding PNGs and non-animated GIFs since the interface is
3105the same, though it is cleaner to use `tf.io.decode_image`.
3106  }];
3107
3108  let arguments = (ins
3109    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
3110
3111    DefaultValuedAttr<I64Attr, "0">:$channels,
3112    DefaultValuedAttr<I64Attr, "1">:$ratio,
3113    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
3114    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
3115    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
3116    DefaultValuedAttr<StrAttr, "">:$dct_method
3117  );
3118
3119  let results = (outs
3120    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
3121  );
3122}
3123
3124def TF_DecodePaddedRawOp : TF_Op<"DecodePaddedRaw", [NoSideEffect]> {
3125  let summary = "Reinterpret the bytes of a string as a vector of numbers.";
3126
3127  let arguments = (ins
3128    Arg<TF_StrTensor, [{Tensor of string to be decoded.}]>:$input_bytes,
3129    Arg<TF_Int32Tensor, [{Length in bytes for each element of the decoded output. Must be a multiple
3130of the size of the output type.}]>:$fixed_length,
3131
3132    DefaultValuedAttr<BoolAttr, "true">:$little_endian
3133  );
3134
3135  let results = (outs
3136    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{A Tensor with one more dimension than the input `bytes`. The added dimension
3137will have size equal to the length of the elements of `bytes` divided by the
3138number of bytes to represent `out_type`.}]>:$output
3139  );
3140
3141  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
3142}
3143
3144def TF_DecodePngOp : TF_Op<"DecodePng", [NoSideEffect]> {
3145  let summary = "Decode a PNG-encoded image to a uint8 or uint16 tensor.";
3146
3147  let description = [{
3148The attr `channels` indicates the desired number of color channels for the
3149decoded image.
3150
3151Accepted values are:
3152
3153*   0: Use the number of channels in the PNG-encoded image.
3154*   1: output a grayscale image.
3155*   3: output an RGB image.
3156*   4: output an RGBA image.
3157
3158If needed, the PNG-encoded image is transformed to match the requested number
3159of color channels.
3160
3161This op also supports decoding JPEGs and non-animated GIFs since the interface
3162is the same, though it is cleaner to use `tf.io.decode_image`.
3163  }];
3164
3165  let arguments = (ins
3166    Arg<TF_StrTensor, [{0-D.  The PNG-encoded image.}]>:$contents,
3167
3168    DefaultValuedAttr<I64Attr, "0">:$channels
3169  );
3170
3171  let results = (outs
3172    Res<TensorOf<[TF_Uint16, TF_Uint8]>, [{3-D with shape `[height, width, channels]`.}]>:$image
3173  );
3174
3175  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3176}
3177
3178def TF_DeleteIteratorOp : TF_Op<"DeleteIterator", []> {
3179  let summary = "A container for an iterator resource.";
3180
3181  let arguments = (ins
3182    Arg<TF_ResourceTensor, [{A handle to the iterator to delete.}], [TF_DatasetIteratorFree]>:$handle,
3183    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
3184  );
3185
3186  let results = (outs);
3187}
3188
3189def TF_DeleteMemoryCacheOp : TF_Op<"DeleteMemoryCache", []> {
3190  let summary = "";
3191
3192  let arguments = (ins
3193    Arg<TF_ResourceTensor, "", [TF_DatasetMemoryCacheFree]>:$handle,
3194    TF_VariantTensor:$deleter
3195  );
3196
3197  let results = (outs);
3198}
3199
3200def TF_DeleteMultiDeviceIteratorOp : TF_Op<"DeleteMultiDeviceIterator", []> {
3201  let summary = "A container for an iterator resource.";
3202
3203  let arguments = (ins
3204    Arg<TF_ResourceTensor, [{A handle to the multi device iterator to delete.}], [TF_DatasetIteratorFree]>:$multi_device_iterator,
3205    Arg<Variadic<TF_ResourceTensor>, [{A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.}], [TF_DatasetIteratorRead]>:$iterators,
3206    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
3207  );
3208
3209  let results = (outs);
3210
3211  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
3212}
3213
3214def TF_DeleteRandomSeedGeneratorOp : TF_Op<"DeleteRandomSeedGenerator", []> {
3215  let summary = "";
3216
3217  let arguments = (ins
3218    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
3219    TF_VariantTensor:$deleter
3220  );
3221
3222  let results = (outs);
3223}
3224
3225def TF_DeleteSeedGeneratorOp : TF_Op<"DeleteSeedGenerator", []> {
3226  let summary = "";
3227
3228  let arguments = (ins
3229    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
3230    TF_VariantTensor:$deleter
3231  );
3232
3233  let results = (outs);
3234}
3235
3236def TF_DepthToSpaceOp : TF_Op<"DepthToSpace", [NoSideEffect]> {
3237  let summary = "DepthToSpace for tensors of type T.";
3238
3239  let description = [{
3240Rearranges data from depth into blocks of spatial data.
3241This is the reverse transformation of SpaceToDepth. More specifically,
3242this op outputs a copy of the input tensor where values from the `depth`
3243dimension are moved in spatial blocks to the `height` and `width` dimensions.
3244The attr `block_size` indicates the input block size and how the data is moved.
3245
3246  * Chunks of data of size `block_size * block_size` from depth are rearranged
3247    into non-overlapping blocks of size `block_size x block_size`
3248  * The width the output tensor is `input_depth * block_size`, whereas the
3249    height is `input_height * block_size`.
3250  * The Y, X coordinates within each block of the output image are determined
3251    by the high order component of the input channel index.
3252  * The depth of the input tensor must be divisible by
3253    `block_size * block_size`.
3254
3255The `data_format` attr specifies the layout of the input and output tensors
3256with the following options:
3257  "NHWC": `[ batch, height, width, channels ]`
3258  "NCHW": `[ batch, channels, height, width ]`
3259  "NCHW_VECT_C":
3260      `qint8 [ batch, channels / 4, height, width, 4 ]`
3261
3262It is useful to consider the operation as transforming a 6-D Tensor.
3263e.g. for data_format = NHWC,
3264     Each element in the input tensor can be specified via 6 coordinates,
3265     ordered by decreasing memory layout significance as:
3266     n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
3267                        within the input image, bX, bY means coordinates
3268                        within the output block, oC means output channels).
3269     The output would be the input transposed to the following layout:
3270     n,iY,bY,iX,bX,oC
3271
3272This operation is useful for resizing the activations between convolutions
3273(but keeping all data), e.g. instead of pooling. It is also useful for training
3274purely convolutional models.
3275
3276For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
3277block_size = 2:
3278
3279```
3280x = [[[[1, 2, 3, 4]]]]
3281
3282```
3283
3284This operation will output a tensor of shape `[1, 2, 2, 1]`:
3285
3286```
3287   [[[[1], [2]],
3288     [[3], [4]]]]
3289```
3290
3291Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
3292the corresponding output will have 2x2 elements and will have a depth of
32931 channel (1 = `4 / (block_size * block_size)`).
3294The output element shape is `[2, 2, 1]`.
3295
3296For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
3297
3298```
3299x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
3300```
3301
3302This operation, for block size of 2, will return the following tensor of shape
3303`[1, 2, 2, 3]`
3304
3305```
3306   [[[[1, 2, 3], [4, 5, 6]],
3307     [[7, 8, 9], [10, 11, 12]]]]
3308
3309```
3310
3311Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
3312
3313```
3314x =  [[[[1, 2, 3, 4],
3315       [5, 6, 7, 8]],
3316      [[9, 10, 11, 12],
3317       [13, 14, 15, 16]]]]
3318```
3319
3320the operator will return the following tensor of shape `[1 4 4 1]`:
3321
3322```
3323x = [[[ [1],   [2],  [5],  [6]],
3324      [ [3],   [4],  [7],  [8]],
3325      [ [9],  [10], [13],  [14]],
3326      [ [11], [12], [15],  [16]]]]
3327
3328```
3329  }];
3330
3331  let arguments = (ins
3332    TF_Tensor:$input,
3333
3334    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
3335    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
3336  );
3337
3338  let results = (outs
3339    TF_Tensor:$output
3340  );
3341
3342  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3343}
3344
3345def TF_DepthwiseConv2dNativeOp : TF_Op<"DepthwiseConv2dNative", [NoSideEffect]> {
3346  let summary = [{
3347Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
3348  }];
3349
3350  let description = [{
3351Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
3352and a filter / kernel tensor of shape
3353`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
3354`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
3355a different filter to each input channel (expanding from 1 channel to
3356`channel_multiplier` channels for each), then concatenates the results
3357together. Thus, the output has `in_channels * channel_multiplier` channels.
3358
3359```
3360for k in 0..in_channels-1
3361  for q in 0..channel_multiplier-1
3362    output[b, i, j, k * channel_multiplier + q] =
3363      sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
3364                        filter[di, dj, k, q]
3365```
3366
3367Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
3368horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
3369  }];
3370
3371  let arguments = (ins
3372    TF_FloatTensor:$input,
3373    TF_FloatTensor:$filter,
3374
3375    I64ArrayAttr:$strides,
3376    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3377    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3378    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3379    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3380  );
3381
3382  let results = (outs
3383    TF_FloatTensor:$output
3384  );
3385
3386  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3387}
3388
3389def TF_DepthwiseConv2dNativeBackpropFilterOp : TF_Op<"DepthwiseConv2dNativeBackpropFilter", [NoSideEffect]> {
3390  let summary = [{
3391Computes the gradients of depthwise convolution with respect to the filter.
3392  }];
3393
3394  let arguments = (ins
3395    Arg<TF_FloatTensor, [{4-D with shape based on `data_format`.  For example, if
3396`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
3397in_width, in_channels]` tensor.}]>:$input,
3398    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
3399where `filter` is a 4-D
3400`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.}]>:$filter_sizes,
3401    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3402For example, if `data_format` is 'NHWC' then
3403out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3404Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3405
3406    I64ArrayAttr:$strides,
3407    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3408    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3409    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3410    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3411  );
3412
3413  let results = (outs
3414    Res<TF_FloatTensor, [{4-D with shape
3415`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
3416the `filter` input of the convolution.}]>:$output
3417  );
3418
3419  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3420}
3421
3422def TF_DepthwiseConv2dNativeBackpropInputOp : TF_Op<"DepthwiseConv2dNativeBackpropInput", [NoSideEffect]> {
3423  let summary = [{
3424Computes the gradients of depthwise convolution with respect to the input.
3425  }];
3426
3427  let arguments = (ins
3428    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`, based
3429on `data_format`.  For example, if `data_format` is 'NHWC' then
3430 `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
3431    Arg<TF_FloatTensor, [{4-D with shape
3432`[filter_height, filter_width, in_channels, depthwise_multiplier]`.}]>:$filter,
3433    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3434For example, if `data_format` is 'NHWC' then
3435out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3436Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3437
3438    I64ArrayAttr:$strides,
3439    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3440    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3441    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3442    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3443  );
3444
3445  let results = (outs
3446    Res<TF_FloatTensor, [{4-D with shape according to `data_format`.  For example, if
3447`data_format` is 'NHWC', output shape is `[batch, in_height,
3448in_width, in_channels]`.  Gradient w.r.t. the input of the
3449convolution.}]>:$output
3450  );
3451
3452  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3453}
3454
3455def TF_DequantizeOp : TF_Op<"Dequantize", [NoSideEffect]> {
3456  let summary = [{
3457Dequantize the 'input' tensor into a float or bfloat16 Tensor.
3458  }];
3459
3460  let description = [{
3461[min_range, max_range] are scalar floats that specify the range for
3462the output. The 'mode' attribute controls exactly which calculations are
3463used to convert the float values to their quantized equivalents.
3464
3465In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
3466
3467```
3468if T == qint8: in[i] += (range(T) + 1)/ 2.0
3469out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
3470```
3471here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
3472
3473*MIN_COMBINED Mode Example*
3474
3475If the input comes from a QuantizedRelu6, the output type is
3476quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
34770-6.  The min_range and max_range values are therefore 0.0 and 6.0.
3478Dequantize on quint8 will take each value, cast to float, and multiply
3479by 6 / 255.
3480Note that if quantizedtype is qint8, the operation will additionally add
3481each value by 128 prior to casting.
3482
3483If the mode is 'MIN_FIRST', then this approach is used:
3484
3485```c++
3486num_discrete_values = 1 << (# of bits in T)
3487range_adjust = num_discrete_values / (num_discrete_values - 1)
3488range = (range_max - range_min) * range_adjust
3489range_scale = range / num_discrete_values
3490const double offset_input = static_cast<double>(input) - lowest_quantized;
3491result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
3492```
3493
3494If the mode is `SCALED`, dequantization is performed by multiplying each
3495input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
3496
3497The scaling_factor is determined from `min_range`, `max_range`, and
3498`narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
3499and `QuantizeV2`, using the following algorithm:
3500
3501```c++
3502
3503  const int min_expected_T = std::numeric_limits<T>::min() +
3504    (narrow_range ? 1 : 0);
3505  const int max_expected_T = std::numeric_limits<T>::max();
3506  const float max_expected_T = std::numeric_limits<float>::max();
3507
3508  const float scale_factor =
3509    (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
3510                                         : std::max(min_range / min_expected_T,
3511                                                    max_range / max_expected_T);
3512```
3513  }];
3514
3515  let arguments = (ins
3516    TensorOf<[TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8]>:$input,
3517    Arg<TF_Float32Tensor, [{The minimum scalar value possibly produced for the input.}]>:$min_range,
3518    Arg<TF_Float32Tensor, [{The maximum scalar value possibly produced for the input.}]>:$max_range,
3519
3520    DefaultValuedAttr<TF_AnyStrAttrOf<["MIN_COMBINED", "MIN_FIRST", "SCALED"]>, "MIN_COMBINED">:$mode,
3521    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
3522    DefaultValuedAttr<I64Attr, "-1">:$axis
3523  );
3524
3525  let results = (outs
3526    TensorOf<[TF_Bfloat16, TF_Float32]>:$output
3527  );
3528
3529  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3530  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3531}
3532
3533def TF_DeserializeIteratorOp : TF_Op<"DeserializeIterator", []> {
3534  let summary = [{
3535Converts the given variant tensor to an iterator and stores it in the given resource.
3536  }];
3537
3538  let arguments = (ins
3539    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorWrite]>:$resource_handle,
3540    Arg<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
3541resource.}]>:$serialized
3542  );
3543
3544  let results = (outs);
3545}
3546
3547def TF_DeserializeSparseOp : TF_Op<"DeserializeSparse", [NoSideEffect]> {
3548  let summary = "Deserialize `SparseTensor` objects.";
3549
3550  let description = [{
3551The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
3552the last dimension stores serialized `SparseTensor` objects and the other N
3553dimensions (N >= 0) correspond to a batch. The ranks of the original
3554`SparseTensor` objects must all match. When the final `SparseTensor` is
3555created, its rank is the rank of the incoming `SparseTensor` objects plus N;
3556the sparse tensors have been concatenated along new dimensions, one for each
3557batch.
3558
3559The output `SparseTensor` object's shape values for the original dimensions
3560are the max across the input `SparseTensor` objects' shape values for the
3561corresponding dimensions. The new dimensions match the size of the batch.
3562
3563The input `SparseTensor` objects' indices are assumed ordered in
3564standard lexicographic order.  If this is not the case, after this
3565step run `SparseReorder` to restore index ordering.
3566
3567For example, if the serialized input is a `[2 x 3]` matrix representing two
3568original `SparseTensor` objects:
3569
3570    index = [ 0]
3571            [10]
3572            [20]
3573    values = [1, 2, 3]
3574    shape = [50]
3575
3576and
3577
3578    index = [ 2]
3579            [10]
3580    values = [4, 5]
3581    shape = [30]
3582
3583then the final deserialized `SparseTensor` will be:
3584
3585    index = [0  0]
3586            [0 10]
3587            [0 20]
3588            [1  2]
3589            [1 10]
3590    values = [1, 2, 3, 4, 5]
3591    shape = [2 50]
3592  }];
3593
3594  let arguments = (ins
3595    Arg<TensorOf<[TF_Str, TF_Variant]>, [{The serialized `SparseTensor` objects. The last dimension
3596must have 3 columns.}]>:$serialized_sparse
3597  );
3598
3599  let results = (outs
3600    TF_Int64Tensor:$sparse_indices,
3601    TF_Tensor:$sparse_values,
3602    TF_Int64Tensor:$sparse_shape
3603  );
3604
3605  TF_DerivedOperandTypeAttr Tserialized = TF_DerivedOperandTypeAttr<0>;
3606  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<1>;
3607}
3608
3609def TF_DestroyResourceOp : TF_Op<"DestroyResourceOp", []> {
3610  let summary = "Deletes the resource specified by the handle.";
3611
3612  let description = [{
3613All subsequent operations using the resource will result in a NotFound
3614error status.
3615  }];
3616
3617  let arguments = (ins
3618    Arg<TF_ResourceTensor, [{handle to the resource to delete.}]>:$resource,
3619
3620    DefaultValuedAttr<BoolAttr, "true">:$ignore_lookup_error
3621  );
3622
3623  let results = (outs);
3624}
3625
3626def TF_DeviceIndexOp : TF_Op<"DeviceIndex", [NoSideEffect]> {
3627  let summary = "Return the index of device the op runs.";
3628
3629  let description = [{
3630Given a list of device names, this operation returns the index of the device
3631this op runs. The length of the list is returned in two cases:
3632(1) Device does not exist in the given device list.
3633(2) It is in XLA compilation.
3634  }];
3635
3636  let arguments = (ins
3637    StrArrayAttr:$device_names
3638  );
3639
3640  let results = (outs
3641    TF_Int32Tensor:$index
3642  );
3643}
3644
3645def TF_DiagOp : TF_Op<"Diag", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
3646  let summary = "Returns a diagonal tensor with a given diagonal values.";
3647
3648  let description = [{
3649Given a `diagonal`, this operation returns a tensor with the `diagonal` and
3650everything else padded with zeros. The diagonal is computed as follows:
3651
3652Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
3653rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
3654
3655`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
3656
3657For example:
3658
3659```
3660# 'diagonal' is [1, 2, 3, 4]
3661tf.diag(diagonal) ==> [[1, 0, 0, 0]
3662                       [0, 2, 0, 0]
3663                       [0, 0, 3, 0]
3664                       [0, 0, 0, 4]]
3665```
3666  }];
3667
3668  let arguments = (ins
3669    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is at most 1.}]>:$diagonal
3670  );
3671
3672  let results = (outs
3673    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
3674  );
3675
3676  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3677}
3678
3679def TF_DiagPartOp : TF_Op<"DiagPart", [NoSideEffect]> {
3680  let summary = "Returns the diagonal part of the tensor.";
3681
3682  let description = [{
3683This operation returns a tensor with the `diagonal` part
3684of the `input`. The `diagonal` part is computed as follows:
3685
3686Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
3687tensor of rank `k` with dimensions `[D1,..., Dk]` where:
3688
3689`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
3690
3691For example:
3692
3693```
3694# 'input' is [[1, 0, 0, 0]
3695              [0, 2, 0, 0]
3696              [0, 0, 3, 0]
3697              [0, 0, 0, 4]]
3698
3699tf.diag_part(input) ==> [1, 2, 3, 4]
3700```
3701  }];
3702
3703  let arguments = (ins
3704    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is even and not zero.}]>:$input
3705  );
3706
3707  let results = (outs
3708    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The extracted diagonal.}]>:$diagonal
3709  );
3710
3711  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3712}
3713
3714def TF_DigammaOp : TF_Op<"Digamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3715  let summary = [{
3716Computes Psi, the derivative of Lgamma (the log of the absolute value of
3717  }];
3718
3719  let description = [{
3720`Gamma(x)`), element-wise.
3721  }];
3722
3723  let arguments = (ins
3724    TF_FloatTensor:$x
3725  );
3726
3727  let results = (outs
3728    TF_FloatTensor:$y
3729  );
3730
3731  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3732}
3733
3734def TF_DivOp : TF_Op<"Div", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
3735               WithBroadcastableBinOpBuilder {
3736  let summary = "Returns x / y element-wise.";
3737
3738  let description = [{
3739*NOTE*: `Div` supports broadcasting. More about broadcasting
3740[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3741  }];
3742
3743  let arguments = (ins
3744    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
3745    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
3746  );
3747
3748  let results = (outs
3749    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
3750  );
3751
3752  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3753
3754  let hasCanonicalizer = 1;
3755
3756  let hasFolder = 1;
3757}
3758
3759def TF_DivNoNanOp : TF_Op<"DivNoNan", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
3760                    WithBroadcastableBinOpBuilder {
3761  let summary = "Returns 0 if the denominator is zero.";
3762
3763  let description = [{
3764*NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
3765[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3766  }];
3767
3768  let arguments = (ins
3769    TF_FpOrComplexTensor:$x,
3770    TF_FpOrComplexTensor:$y
3771  );
3772
3773  let results = (outs
3774    TF_FpOrComplexTensor:$z
3775  );
3776
3777  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3778
3779  let hasCanonicalizer = 1;
3780}
3781
3782def TF_DummyMemoryCacheOp : TF_Op<"DummyMemoryCache", []> {
3783  let summary = "";
3784
3785  let arguments = (ins);
3786
3787  let results = (outs
3788    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle
3789  );
3790}
3791
3792def TF_DummySeedGeneratorOp : TF_Op<"DummySeedGenerator", []> {
3793  let summary = "";
3794
3795  let arguments = (ins);
3796
3797  let results = (outs
3798    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle
3799  );
3800}
3801
3802def TF_DynamicStitchOp : TF_Op<"DynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
3803  let summary = [{
3804Interleave the values from the `data` tensors into a single tensor.
3805  }];
3806
3807  let description = [{
3808Builds a merged tensor such that
3809
3810```python
3811    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
3812```
3813
3814For example, if each `indices[m]` is scalar or vector, we have
3815
3816```python
3817    # Scalar indices:
3818    merged[indices[m], ...] = data[m][...]
3819
3820    # Vector indices:
3821    merged[indices[m][i], ...] = data[m][i, ...]
3822```
3823
3824Each `data[i].shape` must start with the corresponding `indices[i].shape`,
3825and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
3826must have `data[i].shape = indices[i].shape + constant`.  In terms of this
3827`constant`, the output shape is
3828
3829    merged.shape = [max(indices)] + constant
3830
3831Values are merged in order, so if an index appears in both `indices[m][i]` and
3832`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
3833merged result. If you do not need this guarantee, ParallelDynamicStitch might
3834perform better on some devices.
3835
3836For example:
3837
3838```python
3839    indices[0] = 6
3840    indices[1] = [4, 1]
3841    indices[2] = [[5, 2], [0, 3]]
3842    data[0] = [61, 62]
3843    data[1] = [[41, 42], [11, 12]]
3844    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
3845    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
3846              [51, 52], [61, 62]]
3847```
3848
3849This method can be used to merge partitions created by `dynamic_partition`
3850as illustrated on the following example:
3851
3852```python
3853    # Apply function (increments x_i) on elements for which a certain condition
3854    # apply (x_i != -1 in this example).
3855    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
3856    condition_mask=tf.not_equal(x,tf.constant(-1.))
3857    partitioned_data = tf.dynamic_partition(
3858        x, tf.cast(condition_mask, tf.int32) , 2)
3859    partitioned_data[1] = partitioned_data[1] + 1.0
3860    condition_indices = tf.dynamic_partition(
3861        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
3862    x = tf.dynamic_stitch(condition_indices, partitioned_data)
3863    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
3864    # unchanged.
3865```
3866
3867<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3868<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
3869</div>
3870  }];
3871
3872  let arguments = (ins
3873    Variadic<TF_Int32Tensor>:$indices,
3874    Variadic<TF_Tensor>:$data
3875  );
3876
3877  let results = (outs
3878    TF_Tensor:$merged
3879  );
3880
3881  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3882  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3883
3884  let verifier = [{
3885    return Verify(*this);
3886  }];
3887}
3888
3889def TF_EinsumOp : TF_Op<"Einsum", [NoSideEffect]> {
3890  let summary = [{
3891Tensor contraction according to Einstein summation convention.
3892  }];
3893
3894  let description = [{
3895Implements generalized Tensor contraction and reduction. Each input Tensor must
3896have a corresponding input subscript appearing in the comma-separated left-hand
3897side of the equation. The right-hand side of the equation consists of the
3898output subscript. The input subscripts and the output subscript should consist
3899of zero or more named axis labels and at most one ellipsis (`...`).
3900
3901The named axis labels may be any single character other than those having
3902special meaning, namely `,.->`. The behavior of this Op is undefined if it
3903receives an ill-formatted equation; since the validation is done at
3904graph-building time, we omit format validation checks at runtime.
3905
3906Note: This Op is *not* intended to be called by the user; instead users should
3907call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
3908
3909Operations are applied to the input(s) according to the following rules:
3910
3911 (a) Generalized Diagonals: For input dimensions corresponding to axis labels
3912     appearing more than once in the same input subscript, we take the
3913     generalized (`k`-dimensional) diagonal.
3914     For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
3915     generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
3916     `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
3917
3918 (b) Reduction: Axes corresponding to labels appearing only in one input
3919     subscript but not in the output subscript are summed over prior to Tensor
3920     contraction.
3921     For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
3922     the reduction axis labels.
3923
3924 (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
3925     input subscripts and also in the output subscript make up the batch
3926     dimensions in Tensor contraction. Unnamed axis labels corresponding to
3927     ellipsis (`...`) also correspond to batch dimensions.
3928     For example, for the equation denoting batch matrix multiplication,
3929     `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
3930
3931 (d) Contraction: In case of binary einsum, axes corresponding to labels
3932     appearing in two different inputs (and not in the output) are contracted
3933     against each other.
3934     Considering the batch matrix multiplication equation again
3935     (`bij,bjk->bik`), the contracted axis label is `j`.
3936
3937 (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
3938     labels, the opposite operation of (a) is applied. For example, in the
3939     equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
3940     are all zeros, except for the (generalized) diagonal which is populated
3941     with values from the input.
3942     Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
3943     provided to enable computing the symbolic gradient of `tf.einsum`.
3944
3945The output subscripts must contain only labels appearing in at least one of the
3946input subscripts. Furthermore, all dimensions mapping to the same axis label
3947must be equal.
3948
3949Any of the input and output subscripts may contain at most a single ellipsis
3950(`...`). These ellipsis are mapped against dimensions not corresponding to any
3951named axis label. If two inputs contain ellipsis, then they are broadcasted
3952according to standard NumPy broadcasting
3953[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
3954
3955The broadcasted dimensions are placed in the corresponding location of the
3956ellipsis in the output subscript. If the broadcasted dimensions are non-empty
3957and the output subscripts do not contain ellipsis, then an InvalidArgument error
3958is raised.
3959
3960@compatibility(numpy)
3961Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
3962
3963Comparison with `numpy.einsum`:
3964
3965 * This Op only supports unary and binary forms of `numpy.einsum`.
3966 * This Op does not support implicit form. (i.e. equations without `->`).
3967 * This Op also supports repeated indices in the output subscript, which is not
3968   supported by `numpy.einsum`.
3969@end_compatibility
3970  }];
3971
3972  let arguments = (ins
3973    Arg<Variadic<TF_Tensor>, [{List of 1 or 2 Tensors.}]>:$inputs,
3974
3975    StrAttr:$equation
3976  );
3977
3978  let results = (outs
3979    Res<TF_Tensor, [{Output Tensor with shape depending upon `equation`.}]>:$output
3980  );
3981
3982  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3983  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3984
3985  let verifier = [{
3986    return Verify(*this);
3987  }];
3988}
3989
3990def TF_EluOp : TF_Op<"Elu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3991  let summary = "Computes the exponential linear function.";
3992
3993  let description = [{
3994The ELU function is defined as:
3995
3996 * $ e ^ x - 1 $ if $ x < 0 $
3997 * $ x $ if $ x >= 0 $
3998
3999Examples:
4000
4001>>> tf.nn.elu(1.0)
4002<tf.Tensor: shape=(), dtype=float32, numpy=1.0>
4003>>> tf.nn.elu(0.0)
4004<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
4005>>> tf.nn.elu(-1000.0)
4006<tf.Tensor: shape=(), dtype=float32, numpy=-1.0>
4007
4008See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
4009](http://arxiv.org/abs/1511.07289)
4010  }];
4011
4012  let arguments = (ins
4013    TF_FloatTensor:$features
4014  );
4015
4016  let results = (outs
4017    TF_FloatTensor:$activations
4018  );
4019
4020  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4021}
4022
4023def TF_EluGradOp : TF_Op<"EluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4024  let summary = [{
4025Computes gradients for the exponential linear (Elu) operation.
4026  }];
4027
4028  let arguments = (ins
4029    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Elu operation.}]>:$gradients,
4030    Arg<TF_FloatTensor, [{The outputs of the corresponding Elu operation.}]>:$outputs
4031  );
4032
4033  let results = (outs
4034    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + 1)` if outputs < 0,
4035`gradients` otherwise.}]>:$backprops
4036  );
4037
4038  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4039}
4040
4041def TF_EmptyOp : TF_Op<"Empty", []> {
4042  let summary = [{
4043Creates a tensor with the given shape.
4044
4045This operation creates a tensor of `shape` and `dtype`.
4046  }];
4047
4048  let arguments = (ins
4049    Arg<TF_Int32Tensor, [{1-D. Represents the shape of the output tensor.}]>:$shape,
4050
4051    DefaultValuedAttr<BoolAttr, "false">:$init
4052  );
4053
4054  let results = (outs
4055    Res<TF_Tensor, [{A `Tensor` of type `T`.}]>:$output
4056  );
4057
4058  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
4059
4060  let hasFolder = 1;
4061}
4062
4063def TF_EnqueueTPUEmbeddingBatchOp : TF_Op<"EnqueueTPUEmbeddingBatch", [TF_TPUEmbeddingSideEffect]> {
4064  let summary = [{
4065An op that enqueues a list of input batch tensors to TPUEmbedding.
4066  }];
4067
4068  let arguments = (ins
4069    Arg<Variadic<TF_StrTensor>, [{A list of 1D tensors, one for each embedding table, containing the
4070batch inputs encoded as dist_belief.SparseFeatures protos. If the weight
4071field in the SparseFeatures proto is not populated for an ID, a weight of
40721.0 is assumed.}]>:$batch,
4073    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
4074TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
4075'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
4076in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
4077
4078    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
4079    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
4080  );
4081
4082  let results = (outs);
4083
4084  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
4085}
4086
4087def TF_EnqueueTPUEmbeddingIntegerBatchOp : TF_Op<"EnqueueTPUEmbeddingIntegerBatch", [TF_TPUEmbeddingSideEffect]> {
4088  let summary = [{
4089An op that enqueues a list of input batch tensors to TPUEmbedding.
4090  }];
4091
4092  let arguments = (ins
4093    Arg<Variadic<TF_Int32Tensor>, [{A list of 1D tensors, one for each embedding table, containing the
4094indices into the tables.}]>:$batch,
4095    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
4096TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
4097'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
4098in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
4099
4100    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal
4101  );
4102
4103  let results = (outs);
4104
4105  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
4106}
4107
4108def TF_EnqueueTPUEmbeddingRaggedTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingRaggedTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
4109  let summary = "Eases the porting of code that uses tf.nn.embedding_lookup().";
4110
4111  let description = [{
4112sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
4113to the ith feature. table_ids[i] indicates which embedding table to look up ith
4114feature.
4115
4116The tensors at corresponding positions in two of the input lists,
4117embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
4118with dim_size() equal to the total number of lookups into the table described by
4119the corresponding feature.
4120  }];
4121
4122  let arguments = (ins
4123    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the break points for splitting
4124embedding_indices and aggregation_weights into rows.
4125It corresponds to ids.row_splits in embedding_lookup(), when ids is a
4126RaggedTensor.}]>:$sample_splits,
4127    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
4128It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.}]>:$embedding_indices,
4129    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
4130aggregation weights. It corresponds to the values field of a RaggedTensor
4131with the same row_splits as ids in embedding_lookup(), when ids is a
4132RaggedTensor.}]>:$aggregation_weights,
4133    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
4134TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
4135'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
4136in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
4137
4138    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
4139    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
4140    I64ArrayAttr:$table_ids,
4141    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
4142    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
4143  );
4144
4145  let results = (outs);
4146
4147  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
4148  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
4149  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
4150  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
4151}
4152
4153def TF_EnqueueTPUEmbeddingSparseBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
4154  let summary = [{
4155An op that enqueues TPUEmbedding input indices from a SparseTensor.
4156  }];
4157
4158  let description = [{
4159This Op eases the porting of code that uses embedding_lookup_sparse(),
4160although some Python preprocessing of the SparseTensor arguments to
4161embedding_lookup_sparse() is required to produce the arguments to this Op,
4162since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
4163step.
4164
4165The tensors at corresponding positions in the three input lists
4166must have the same shape, i.e. rank 1 with dim_size() equal to the total
4167number of lookups into the table described by the corresponding table_id.
4168  }];
4169
4170  let arguments = (ins
4171    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example and
4172feature to which the corresponding embedding_indices and aggregation_weights
4173values belong. sample_indices[i] must equal b * nf + f, where nf is the
4174number of features from the corresponding table, f is in [0, nf), and
4175b is in [0, batch size).}]>:$sample_indices,
4176    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.}]>:$embedding_indices,
4177    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per sample -- i.e. per
4178(training example, feature) -- aggregation weights.}]>:$aggregation_weights,
4179    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
4180TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
4181'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
4182in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
4183
4184    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
4185    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
4186  );
4187
4188  let results = (outs);
4189
4190  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
4191  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
4192  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
4193  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
4194}
4195
4196def TF_EnqueueTPUEmbeddingSparseTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
4197  let summary = [{
4198Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
4199  }];
4200
4201  let description = [{
4202sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
4203to the ith feature. table_ids[i] indicates which embedding table to look up ith
4204feature.
4205
4206The tensors at corresponding positions in the three input lists (sample_indices,
4207embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
4208with dim_size() equal to the total number of lookups into the table described by
4209the corresponding feature.
4210  }];
4211
4212  let arguments = (ins
4213    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example to
4214which the corresponding embedding_indices and aggregation_weights values
4215belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().}]>:$sample_indices,
4216    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
4217It corresponds to sp_ids.values in embedding_lookup_sparse().}]>:$embedding_indices,
4218    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
4219aggregation weights. It corresponds to sp_weights.values in
4220embedding_lookup_sparse().}]>:$aggregation_weights,
4221    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
4222TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
4223'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
4224in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
4225
4226    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
4227    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
4228    I64ArrayAttr:$table_ids,
4229    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
4230    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
4231  );
4232
4233  let results = (outs);
4234
4235  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
4236  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
4237  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
4238  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
4239}
4240
4241def TF_EnsureShapeOp : TF_Op<"EnsureShape", [NoSideEffect]> {
4242  let summary = "Ensures that the tensor's shape matches the expected shape.";
4243
4244  let description = [{
4245Raises an error if the input tensor's shape does not match the specified shape.
4246Returns the input tensor otherwise.
4247  }];
4248
4249  let arguments = (ins
4250    Arg<TF_Tensor, [{A tensor, whose shape is to be validated.}]>:$input,
4251
4252    TF_ShapeAttr:$shape
4253  );
4254
4255  let results = (outs
4256    Res<TF_Tensor, [{A tensor with the same shape and contents as the input tensor or value.}]>:$output
4257  );
4258
4259  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4260
4261  let hasFolder = 1;
4262}
4263
4264def TF_EqualOp : TF_Op<"Equal", [Commutative, NoSideEffect]> {
4265  let summary = "Returns the truth value of (x == y) element-wise.";
4266
4267  let description = [{
4268*NOTE*: `Equal` supports broadcasting. More about broadcasting
4269[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4270
4271```python
4272x = tf.constant([2, 4])
4273y = tf.constant(2)
4274tf.math.equal(x, y) ==> array([True, False])
4275
4276x = tf.constant([2, 4])
4277y = tf.constant([2, 4])
4278tf.math.equal(x, y) ==> array([True,  True])
4279```
4280  }];
4281
4282  let arguments = (ins
4283    TF_Tensor:$x,
4284    TF_Tensor:$y,
4285
4286    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
4287  );
4288
4289  let results = (outs
4290    TF_BoolTensor:$z
4291  );
4292
4293  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4294
4295  let builders = [
4296    OpBuilder<(ins "Value":$x, "Value":$y,
4297      "BoolAttr":$incompatible_shape_error)>
4298  ];
4299
4300  let verifier = [{
4301    return Verify(*this);
4302  }];
4303
4304  let hasCanonicalizer = 1;
4305}
4306
4307def TF_ErfOp : TF_Op<"Erf", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4308  let summary = [{
4309Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` element-wise. In statistics, for non-negative values of $x$, the error function has the following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, x]$.
4310  }];
4311
4312  let arguments = (ins
4313    TF_FloatTensor:$x
4314  );
4315
4316  let results = (outs
4317    TF_FloatTensor:$y
4318  );
4319
4320  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4321}
4322
4323def TF_ErfcOp : TF_Op<"Erfc", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4324  let summary = [{
4325Computes the complementary error function of `x` element-wise.
4326  }];
4327
4328  let arguments = (ins
4329    TF_FloatTensor:$x
4330  );
4331
4332  let results = (outs
4333    TF_FloatTensor:$y
4334  );
4335
4336  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4337}
4338
4339def TF_ErfinvOp : TF_Op<"Erfinv", [NoSideEffect]> {
4340  let summary = "";
4341
4342  let arguments = (ins
4343    TF_FloatTensor:$x
4344  );
4345
4346  let results = (outs
4347    TF_FloatTensor:$y
4348  );
4349
4350  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4351}
4352
4353def TF_ExpOp : TF_Op<"Exp", [NoSideEffect, SameOperandsAndResultType]> {
4354  let summary = [{
4355Computes exponential of x element-wise.  \\(y = e^x\\).
4356  }];
4357
4358  let description = [{
4359This function computes the exponential of every element in the input tensor.
4360  i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
4361  `e` denotes Euler's number and is approximately equal to 2.718281.
4362  Output is positive for any real input.
4363
4364  ```python
4365  x = tf.constant(2.0)
4366  tf.math.exp(x) ==> 7.389056
4367
4368  x = tf.constant([2.0, 8.0])
4369  tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
4370  ```
4371
4372  For complex numbers, the exponential value is calculated as follows:
4373
4374  ```
4375  e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
4376  ```
4377
4378  Let's consider complex number 1+1j as an example.
4379  e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
4380
4381  ```python
4382  x = tf.constant(1 + 1j)
4383  tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
4384  ```
4385  }];
4386
4387  let arguments = (ins
4388    TF_FpOrComplexTensor:$x
4389  );
4390
4391  let results = (outs
4392    TF_FpOrComplexTensor:$y
4393  );
4394
4395  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4396}
4397
4398def TF_ExpandDimsOp : TF_Op<"ExpandDims", [NoSideEffect]> {
4399  let summary = "Inserts a dimension of 1 into a tensor's shape.";
4400
4401  let description = [{
4402Given a tensor `input`, this operation inserts a dimension of 1 at the
4403dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
4404zero; if you specify a negative number for `axis` it is counted backward from
4405the end.
4406
4407This operation is useful if you want to add a batch dimension to a single
4408element. For example, if you have a single image of shape `[height, width,
4409channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
4410which will make the shape `[1, height, width, channels]`.
4411
4412Other examples:
4413
4414```
4415# 't' is a tensor of shape [2]
4416shape(expand_dims(t, 0)) ==> [1, 2]
4417shape(expand_dims(t, 1)) ==> [2, 1]
4418shape(expand_dims(t, -1)) ==> [2, 1]
4419
4420# 't2' is a tensor of shape [2, 3, 5]
4421shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
4422shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
4423shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
4424```
4425
4426This operation requires that:
4427
4428`-1-input.dims() <= dim <= input.dims()`
4429
4430This operation is related to `squeeze()`, which removes dimensions of
4431size 1.
4432  }];
4433
4434  let arguments = (ins
4435    TF_Tensor:$input,
4436    Arg<TF_I32OrI64Tensor, [{0-D (scalar). Specifies the dimension index at which to
4437expand the shape of `input`. Must be in the range
4438`[-rank(input) - 1, rank(input)]`.}]>:$dim
4439  );
4440
4441  let results = (outs
4442    Res<TF_Tensor, [{Contains the same data as `input`, but its shape has an additional
4443dimension of size 1 added.}]>:$output
4444  );
4445
4446  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4447  TF_DerivedOperandTypeAttr Tdim = TF_DerivedOperandTypeAttr<1>;
4448
4449  let builders = [
4450    OpBuilder<(ins "Value":$condition, "Value":$dim)>
4451  ];
4452}
4453
4454def TF_Expm1Op : TF_Op<"Expm1", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4455  let summary = "Computes `exp(x) - 1` element-wise.";
4456
4457  let description = [{
4458i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
4459  `e` denotes Euler's number and is approximately equal to 2.718281.
4460
4461  ```python
4462  x = tf.constant(2.0)
4463  tf.math.expm1(x) ==> 6.389056
4464
4465  x = tf.constant([2.0, 8.0])
4466  tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
4467
4468  x = tf.constant(1 + 1j)
4469  tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
4470  ```
4471  }];
4472
4473  let arguments = (ins
4474    TF_FpOrComplexTensor:$x
4475  );
4476
4477  let results = (outs
4478    TF_FpOrComplexTensor:$y
4479  );
4480
4481  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4482
4483  let extraClassDeclaration = [{
4484    // InferTypeOpInterface:
4485    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
4486      return ArraysAreCastCompatible(l, r);
4487    }
4488  }];
4489
4490}
4491
4492def TF_ExtractImagePatchesOp : TF_Op<"ExtractImagePatches", [NoSideEffect]> {
4493  let summary = [{
4494Extract `patches` from `images` and put them in the "depth" output dimension.
4495  }];
4496
4497  let arguments = (ins
4498    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.}]>:$images,
4499
4500    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksizes,
4501    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
4502    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$rates,
4503    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding
4504  );
4505
4506  let results = (outs
4507    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
4508ksize_cols * depth]` containing image patches with size
4509`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
4510`out_rows` and `out_cols` are the dimensions of the output patches.}]>:$patches
4511  );
4512
4513  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4514}
4515
4516def TF_FFTOp : TF_Op<"FFT", [NoSideEffect]> {
4517  let summary = "Fast Fourier transform.";
4518
4519  let description = [{
4520Computes the 1-dimensional discrete Fourier transform over the inner-most
4521dimension of `input`.
4522  }];
4523
4524  let arguments = (ins
4525    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4526  );
4527
4528  let results = (outs
4529    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
4530  dimension of `input` is replaced with its 1D Fourier transform.
4531
4532@compatibility(numpy)
4533Equivalent to np.fft.fft
4534@end_compatibility}]>:$output
4535  );
4536
4537  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4538}
4539
4540def TF_FFT2DOp : TF_Op<"FFT2D", [NoSideEffect]> {
4541  let summary = "2D fast Fourier transform.";
4542
4543  let description = [{
4544Computes the 2-dimensional discrete Fourier transform over the inner-most
45452 dimensions of `input`.
4546  }];
4547
4548  let arguments = (ins
4549    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4550  );
4551
4552  let results = (outs
4553    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
4554  dimensions of `input` are replaced with their 2D Fourier transform.
4555
4556@compatibility(numpy)
4557Equivalent to np.fft.fft2
4558@end_compatibility}]>:$output
4559  );
4560
4561  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4562}
4563
4564def TF_FFT3DOp : TF_Op<"FFT3D", [NoSideEffect]> {
4565  let summary = "3D fast Fourier transform.";
4566
4567  let description = [{
4568Computes the 3-dimensional discrete Fourier transform over the inner-most 3
4569dimensions of `input`.
4570  }];
4571
4572  let arguments = (ins
4573    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4574  );
4575
4576  let results = (outs
4577    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
4578  dimensions of `input` are replaced with their 3D Fourier transform.
4579
4580@compatibility(numpy)
4581Equivalent to np.fft.fftn with 3 dimensions.
4582@end_compatibility}]>:$output
4583  );
4584
4585  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4586}
4587
4588def TF_FakeParamOp : TF_Op<"FakeParam", [NoSideEffect, TF_NoConstantFold]> {
4589  let summary = [{
4590  This op is used as a placeholder in If branch functions. It doesn't provide a
4591  valid output when run, so must either be removed (e.g. replaced with a
4592  function input) or guaranteed not to be used (e.g. if mirroring an
4593  intermediate output needed for the gradient computation of the other branch).
4594  }];
4595
4596  let arguments = (ins
4597    TF_ShapeAttr:$shape
4598  );
4599
4600  let results = (outs
4601    Res<TF_Tensor, [{    \"Fake\" output value. This should not be consumed by another op.}]>:$output
4602  );
4603
4604  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
4605}
4606
4607def TF_FakeQuantWithMinMaxArgsOp : TF_Op<"FakeQuantWithMinMaxArgs", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4608  let summary = [{
4609Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
4610  }];
4611
4612  let description = [{
4613Attributes
4614
4615*   `[min; max]` define the clamping range for the `inputs` data.
4616*   `inputs` values are quantized into the quantization range (
4617`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4618when it is true) and then de-quantized and output as floats in `[min; max]`
4619interval.
4620*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4621
4622Before quantization, `min` and `max` values are adjusted with the following
4623logic.
4624It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4625the behavior can be unexpected:
4626
4627*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4628*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4629*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4630`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4631
4632Quantization is called fake since the output is still in floating point.
4633  }];
4634
4635  let arguments = (ins
4636    TF_Float32Tensor:$inputs,
4637
4638    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4639    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4640    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4641    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4642  );
4643
4644  let results = (outs
4645    TF_Float32Tensor:$outputs
4646  );
4647
4648  let verifier = [{
4649    return Verify(*this);
4650  }];
4651}
4652
4653def TF_FakeQuantWithMinMaxArgsGradientOp : TF_Op<"FakeQuantWithMinMaxArgsGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4654  let summary = "Compute gradients for a FakeQuantWithMinMaxArgs operation.";
4655
4656  let arguments = (ins
4657    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.}]>:$gradients,
4658    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxArgs operation.}]>:$inputs,
4659
4660    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4661    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4662    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4663    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4664  );
4665
4666  let results = (outs
4667    Res<TF_Float32Tensor, [{Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
4668`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops
4669  );
4670}
4671
4672def TF_FakeQuantWithMinMaxVarsOp : TF_Op<"FakeQuantWithMinMaxVars", [NoSideEffect]> {
4673  let summary = [{
4674Fake-quantize the 'inputs' tensor of type float via global float scalars
4675  }];
4676
4677  let description = [{
4678Fake-quantize the `inputs` tensor of type float via global float scalars
4679`min` and `max` to `outputs` tensor of same shape as `inputs`.
4680
4681Attributes
4682
4683*   `[min; max]` define the clamping range for the `inputs` data.
4684*   `inputs` values are quantized into the quantization range (
4685`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4686when it is true) and then de-quantized and output as floats in `[min; max]`
4687interval.
4688*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4689
4690Before quantization, `min` and `max` values are adjusted with the following
4691logic.
4692It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4693the behavior can be unexpected:
4694
4695*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4696*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4697*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4698`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4699
4700This operation has a gradient and thus allows for training `min` and `max`
4701values.
4702  }];
4703
4704  let arguments = (ins
4705    TF_Float32Tensor:$inputs,
4706    TF_Float32Tensor:$min,
4707    TF_Float32Tensor:$max,
4708
4709    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4710    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4711  );
4712
4713  let results = (outs
4714    TF_Float32Tensor:$outputs
4715  );
4716
4717  let verifier = [{
4718    return Verify(*this);
4719  }];
4720}
4721
4722def TF_FakeQuantWithMinMaxVarsGradientOp : TF_Op<"FakeQuantWithMinMaxVarsGradient", [NoSideEffect]> {
4723  let summary = "Compute gradients for a FakeQuantWithMinMaxVars operation.";
4724
4725  let arguments = (ins
4726    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxVars operation.}]>:$gradients,
4727    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxVars operation.
4728min, max: Quantization interval, scalar floats.}]>:$inputs,
4729    TF_Float32Tensor:$min,
4730    TF_Float32Tensor:$max,
4731
4732    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4733    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4734  );
4735
4736  let results = (outs
4737    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. inputs:
4738`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops_wrt_input,
4739    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. min parameter:
4740`sum(gradients * (inputs < min))`.}]>:$backprop_wrt_min,
4741    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. max parameter:
4742`sum(gradients * (inputs > max))`.}]>:$backprop_wrt_max
4743  );
4744}
4745
4746def TF_FakeQuantWithMinMaxVarsPerChannelOp : TF_Op<"FakeQuantWithMinMaxVarsPerChannel", [NoSideEffect]> {
4747  let summary = [{
4748Fake-quantize the 'inputs' tensor of type float via per-channel floats
4749  }];
4750
4751  let description = [{
4752Fake-quantize the `inputs` tensor of type float per-channel and one of the
4753shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
4754of shape `[d]` to `outputs` tensor of same shape as `inputs`.
4755
4756Attributes
4757
4758*   `[min; max]` define the clamping range for the `inputs` data.
4759*   `inputs` values are quantized into the quantization range (
4760`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4761when it is true) and then de-quantized and output as floats in `[min; max]`
4762interval.
4763*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4764
4765Before quantization, `min` and `max` values are adjusted with the following
4766logic.
4767It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4768the behavior can be unexpected:
4769
4770*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4771*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4772*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4773`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4774
4775This operation has a gradient and thus allows for training `min` and `max`
4776values.
4777  }];
4778
4779  let arguments = (ins
4780    TF_Float32Tensor:$inputs,
4781    TF_Float32Tensor:$min,
4782    TF_Float32Tensor:$max,
4783
4784    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4785    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4786  );
4787
4788  let results = (outs
4789    TF_Float32Tensor:$outputs
4790  );
4791
4792  let verifier = [{
4793    return Verify(*this);
4794  }];
4795}
4796
4797def TF_FillOp : TF_Op<"Fill", [NoSideEffect]> {
4798  let summary = "Creates a tensor filled with a scalar value.";
4799
4800  let description = [{
4801This operation creates a tensor of shape `dims` and fills it with `value`.
4802
4803For example:
4804
4805```
4806# Output tensor has shape [2, 3].
4807fill([2, 3], 9) ==> [[9, 9, 9]
4808                     [9, 9, 9]]
4809```
4810
4811`tf.fill` differs from `tf.constant` in a few ways:
4812
4813*   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
4814    Tensor values.
4815*   `tf.fill` creates an Op in the computation graph that constructs the actual
4816    Tensor value at runtime. This is in contrast to `tf.constant` which embeds
4817    the entire Tensor into the graph with a `Const` node.
4818*   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
4819    based on other runtime Tensors, unlike `tf.constant`.
4820  }];
4821
4822  let arguments = (ins
4823    Arg<TF_I32OrI64Tensor, [{1-D. Represents the shape of the output tensor.}]>:$dims,
4824    Arg<TF_Tensor, [{0-D (scalar). Value to fill the returned tensor.
4825
4826@compatibility(numpy)
4827Equivalent to np.full
4828@end_compatibility}]>:$value
4829  );
4830
4831  let results = (outs
4832    TF_Tensor:$output
4833  );
4834
4835  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
4836  TF_DerivedOperandTypeAttr index_type = TF_DerivedOperandTypeAttr<0>;
4837
4838  let verifier = [{
4839    return Verify(*this);
4840  }];
4841
4842  let hasFolder = 1;
4843
4844  let builders = [
4845    OpBuilder<(ins "Value":$dims, "Value":$value)>
4846  ];
4847}
4848
4849def TF_FinalizeDatasetOp : TF_Op<"FinalizeDataset", [NoSideEffect]> {
4850  let summary = [{
4851Creates a dataset by applying `tf.data.Options` to `input_dataset`.
4852  }];
4853
4854  let arguments = (ins
4855    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
4856
4857    DefaultValuedAttr<BoolAttr, "false">:$has_captured_ref,
4858    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
4859    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
4860  );
4861
4862  let results = (outs
4863    TF_VariantTensor:$handle
4864  );
4865}
4866
4867def TF_FlatMapDatasetOp : TF_Op<"FlatMapDataset", [NoSideEffect]> {
4868  let summary = [{
4869Creates a dataset that applies `f` to the outputs of `input_dataset`.
4870  }];
4871
4872  let description = [{
4873Unlike MapDataset, the `f` in FlatMapDataset is expected to return a
4874Dataset variant, and FlatMapDataset will flatten successive results
4875into a single Dataset.
4876  }];
4877
4878  let arguments = (ins
4879    TF_VariantTensor:$input_dataset,
4880    Variadic<TF_Tensor>:$other_arguments,
4881
4882    SymbolRefAttr:$f,
4883    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
4884    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
4885  );
4886
4887  let results = (outs
4888    TF_VariantTensor:$handle
4889  );
4890
4891  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
4892}
4893
4894def TF_FloorOp : TF_Op<"Floor", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
4895  let summary = "Returns element-wise largest integer not greater than x.";
4896
4897  let arguments = (ins
4898    TF_FloatTensor:$x
4899  );
4900
4901  let results = (outs
4902    TF_FloatTensor:$y
4903  );
4904
4905  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4906}
4907
4908def TF_FloorDivOp : TF_Op<"FloorDiv", [NoSideEffect, ResultsBroadcastableShape]>,
4909                    WithBroadcastableBinOpBuilder {
4910  let summary = "Returns x // y element-wise.";
4911
4912  let description = [{
4913*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
4914[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4915  }];
4916
4917  let arguments = (ins
4918    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
4919    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
4920  );
4921
4922  let results = (outs
4923    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
4924  );
4925
4926  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4927}
4928
4929def TF_FloorModOp : TF_Op<"FloorMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
4930                    WithBroadcastableBinOpBuilder {
4931  let summary = [{
4932Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
4933  }];
4934
4935  let description = [{
4936true, this follows Python semantics in that the result here is consistent
4937with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
4938
4939*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
4940[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4941  }];
4942
4943  let arguments = (ins
4944    TF_IntOrFpTensor:$x,
4945    TF_IntOrFpTensor:$y
4946  );
4947
4948  let results = (outs
4949    TF_IntOrFpTensor:$z
4950  );
4951
4952  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4953}
4954
4955def TF_FusedBatchNormOp : TF_Op<"FusedBatchNorm", [NoSideEffect]> {
4956  let summary = "Batch normalization.";
4957
4958  let description = [{
4959Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4960The size of 1D Tensors matches the dimension C of the 4D Tensors.
4961  }];
4962
4963  let arguments = (ins
4964    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
4965    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4966    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4967    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4968must be empty for training.}]>:$mean,
4969    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4970must be empty for training.}]>:$variance,
4971
4972    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4973    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4974    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4975    DefaultValuedAttr<BoolAttr, "true">:$is_training
4976  );
4977
4978  let results = (outs
4979    Res<TF_Float32Tensor, [{A 4D Tensor for output data.}]>:$y,
4980    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4981to compute the running mean.}]>:$batch_mean,
4982    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4983TensorFlow to compute the running variance.}]>:$batch_variance,
4984    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4985in the gradient computation.}]>:$reserve_space_1,
4986    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4987in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
4988  );
4989
4990  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4991
4992  let hasCanonicalizer = 1;
4993
4994  let verifier = [{
4995    return Verify(*this);
4996  }];
4997}
4998
4999def TF_FusedBatchNormGradOp : TF_Op<"FusedBatchNormGrad", [NoSideEffect]> {
5000  let summary = "Gradient for batch normalization.";
5001
5002  let description = [{
5003Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
5004The size of 1D Tensors matches the dimension C of the 4D Tensors.
5005  }];
5006
5007  let arguments = (ins
5008    Arg<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
5009    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
5010    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
5011    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5012mean to be reused in gradient computation. When is_training is
5013False, a 1D Tensor for the population mean to be reused in both
50141st and 2nd order gradient computation.}]>:$reserve_space_1,
5015    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5016variance (inverted variance in the cuDNN case) to be reused in
5017gradient computation. When is_training is False, a 1D Tensor
5018for the population variance to be reused in both 1st and 2nd
5019order gradient computation.}]>:$reserve_space_2,
5020
5021    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
5022    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
5023    DefaultValuedAttr<BoolAttr, "true">:$is_training
5024  );
5025
5026  let results = (outs
5027    Res<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
5028    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
5029    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
5030    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
5031    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
5032in FusedBatchNorm.}]>:$reserve_space_4
5033  );
5034
5035  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5036}
5037
5038def TF_FusedBatchNormGradV2Op : TF_Op<"FusedBatchNormGradV2", [NoSideEffect]> {
5039  let summary = "Gradient for batch normalization.";
5040
5041  let description = [{
5042Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
5043The size of 1D Tensors matches the dimension C of the 4D Tensors.
5044  }];
5045
5046  let arguments = (ins
5047    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
5048    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
5049    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
5050    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5051mean to be reused in gradient computation. When is_training is
5052False, a 1D Tensor for the population mean to be reused in both
50531st and 2nd order gradient computation.}]>:$reserve_space_1,
5054    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5055variance (inverted variance in the cuDNN case) to be reused in
5056gradient computation. When is_training is False, a 1D Tensor
5057for the population variance to be reused in both 1st and 2nd
5058order gradient computation.}]>:$reserve_space_2,
5059
5060    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
5061    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
5062    DefaultValuedAttr<BoolAttr, "true">:$is_training
5063  );
5064
5065  let results = (outs
5066    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
5067    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
5068    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
5069    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
5070    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
5071in FusedBatchNorm.}]>:$reserve_space_4
5072  );
5073
5074  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5075  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
5076}
5077
5078def TF_FusedBatchNormGradV3Op : TF_Op<"FusedBatchNormGradV3", [NoSideEffect, TF_LayoutSensitiveInterface]> {
5079  let summary = "Gradient for batch normalization.";
5080
5081  let description = [{
5082Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
5083The size of 1D Tensors matches the dimension C of the 4D Tensors.
5084  }];
5085
5086  let arguments = (ins
5087    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
5088    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
5089    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
5090    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5091mean to be reused in gradient computation. When is_training is
5092False, a 1D Tensor for the population mean to be reused in both
50931st and 2nd order gradient computation.}]>:$reserve_space_1,
5094    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
5095variance (inverted variance in the cuDNN case) to be reused in
5096gradient computation. When is_training is False, a 1D Tensor
5097for the population variance to be reused in both 1st and 2nd
5098order gradient computation.}]>:$reserve_space_2,
5099    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for some intermediate results to be reused
5100in gradient computation. When is_training is False, a dummy empty Tensor will be
5101created.}]>:$reserve_space_3,
5102
5103    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
5104    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
5105    DefaultValuedAttr<BoolAttr, "true">:$is_training
5106  );
5107
5108  let results = (outs
5109    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
5110    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
5111    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
5112    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_4,
5113    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
5114in FusedBatchNorm.}]>:$reserve_space_5
5115  );
5116
5117  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5118  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
5119
5120  let extraClassDeclaration = [{
5121    // TF_LayoutSensitiveInterface:
5122    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0, 1}; }
5123    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
5124    StringRef GetOptimalLayout(const RuntimeDevices& devices);
5125    LogicalResult UpdateDataFormat(StringRef data_format);
5126  }];
5127}
5128
5129def TF_FusedBatchNormV2Op : TF_Op<"FusedBatchNormV2", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
5130  let summary = "Batch normalization.";
5131
5132  let description = [{
5133Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
5134The size of 1D Tensors matches the dimension C of the 4D Tensors.
5135  }];
5136
5137  let arguments = (ins
5138    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
5139    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
5140    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
5141    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
5142must be empty for training.}]>:$mean,
5143    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
5144must be empty for training.}]>:$variance,
5145
5146    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
5147    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
5148    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
5149    DefaultValuedAttr<BoolAttr, "true">:$is_training
5150  );
5151
5152  let results = (outs
5153    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
5154    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
5155to compute the running mean.}]>:$batch_mean,
5156    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
5157TensorFlow to compute the running variance.}]>:$batch_variance,
5158    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
5159in the gradient computation.}]>:$reserve_space_1,
5160    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
5161in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
5162  );
5163
5164  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5165  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
5166
5167  let extraClassDeclaration = [{
5168    // TF_FoldOperandsTransposeInterface:
5169    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
5170    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
5171    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
5172
5173    // TF_LayoutSensitiveInterface:
5174    StringRef GetOptimalLayout(const RuntimeDevices& devices);
5175    LogicalResult UpdateDataFormat(StringRef data_format);
5176  }];
5177}
5178
5179def TF_FusedBatchNormV3Op : TF_Op<"FusedBatchNormV3", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
5180  let summary = "Batch normalization.";
5181
5182  let description = [{
5183Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
5184The size of 1D Tensors matches the dimension C of the 4D Tensors.
5185  }];
5186
5187  let arguments = (ins
5188    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
5189    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
5190    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
5191    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
5192must be empty for training.}]>:$mean,
5193    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
5194must be empty for training.}]>:$variance,
5195
5196    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
5197    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
5198    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
5199    DefaultValuedAttr<BoolAttr, "true">:$is_training
5200  );
5201
5202  let results = (outs
5203    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
5204    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
5205to compute the running mean.}]>:$batch_mean,
5206    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
5207TensorFlow to compute the running variance.}]>:$batch_variance,
5208    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
5209in the gradient computation.}]>:$reserve_space_1,
5210    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
5211in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2,
5212    Res<TF_Float32Tensor, [{A 1D Tensor for some intermediate results, to be reused in the gradient
5213computation for better efficiency.}]>:$reserve_space_3
5214  );
5215
5216  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5217  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
5218
5219  let extraClassDeclaration = [{
5220    // TF_FoldOperandsTransposeInterface:
5221    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
5222    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
5223    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
5224
5225    // TF_LayoutSensitiveInterface:
5226    StringRef GetOptimalLayout(const RuntimeDevices& devices);
5227    LogicalResult UpdateDataFormat(StringRef data_format);
5228  }];
5229}
5230
5231def TF_GatherOp : TF_Op<"Gather", [NoSideEffect]> {
5232  let summary = "Gather slices from `params` according to `indices`.";
5233
5234  let description = [{
5235`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
5236Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
5237
5238```python
5239    # Scalar indices
5240    output[:, ..., :] = params[indices, :, ... :]
5241
5242    # Vector indices
5243    output[i, :, ..., :] = params[indices[i], :, ... :]
5244
5245    # Higher rank indices
5246    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
5247```
5248
5249If `indices` is a permutation and `len(indices) == params.shape[0]` then
5250this operation will permute `params` accordingly.
5251
5252`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
5253`indices` are always validated to be within range. If assigned to GPU,
5254out-of-bound indices result in safe but unspecified behavior, which may include
5255raising an error.
5256
5257<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
5258<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
5259</div>
5260  }];
5261
5262  let arguments = (ins
5263    TF_Tensor:$params,
5264    TF_I32OrI64Tensor:$indices,
5265
5266    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
5267  );
5268
5269  let results = (outs
5270    TF_Tensor:$output
5271  );
5272
5273  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
5274  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
5275}
5276
5277def TF_GatherNdOp : TF_Op<"GatherNd", [NoSideEffect]> {
5278  let summary = [{
5279Gather slices from `params` into a Tensor with shape specified by `indices`.
5280  }];
5281
5282  let description = [{
5283`indices` is a K-dimensional integer tensor, best thought of as a
5284(K-1)-dimensional tensor of indices into `params`, where each element defines a
5285slice of `params`:
5286
5287    output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
5288
5289Whereas in `tf.gather` `indices` defines slices into the `axis`
5290dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
5291first `N` dimensions of `params`, where `N = indices.shape[-1]`.
5292
5293The last dimension of `indices` can be at most the rank of
5294`params`:
5295
5296    indices.shape[-1] <= params.rank
5297
5298The last dimension of `indices` corresponds to elements
5299(if `indices.shape[-1] == params.rank`) or slices
5300(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
5301of `params`.  The output tensor has shape
5302
5303    indices.shape[:-1] + params.shape[indices.shape[-1]:]
5304
5305Note that on CPU, if an out of bound index is found, an error is returned.
5306On GPU, if an out of bound index is found, a 0 is stored in the
5307corresponding output value.
5308
5309Some examples below.
5310
5311Simple indexing into a matrix:
5312
5313```python
5314    indices = [[0, 0], [1, 1]]
5315    params = [['a', 'b'], ['c', 'd']]
5316    output = ['a', 'd']
5317```
5318
5319Slice indexing into a matrix:
5320
5321```python
5322    indices = [[1], [0]]
5323    params = [['a', 'b'], ['c', 'd']]
5324    output = [['c', 'd'], ['a', 'b']]
5325```
5326
5327Indexing into a 3-tensor:
5328
5329```python
5330    indices = [[1]]
5331    params = [[['a0', 'b0'], ['c0', 'd0']],
5332              [['a1', 'b1'], ['c1', 'd1']]]
5333    output = [[['a1', 'b1'], ['c1', 'd1']]]
5334
5335
5336    indices = [[0, 1], [1, 0]]
5337    params = [[['a0', 'b0'], ['c0', 'd0']],
5338              [['a1', 'b1'], ['c1', 'd1']]]
5339    output = [['c0', 'd0'], ['a1', 'b1']]
5340
5341
5342    indices = [[0, 0, 1], [1, 0, 1]]
5343    params = [[['a0', 'b0'], ['c0', 'd0']],
5344              [['a1', 'b1'], ['c1', 'd1']]]
5345    output = ['b0', 'b1']
5346```
5347
5348Batched indexing into a matrix:
5349
5350```python
5351    indices = [[[0, 0]], [[0, 1]]]
5352    params = [['a', 'b'], ['c', 'd']]
5353    output = [['a'], ['b']]
5354```
5355
5356Batched slice indexing into a matrix:
5357
5358```python
5359    indices = [[[1]], [[0]]]
5360    params = [['a', 'b'], ['c', 'd']]
5361    output = [[['c', 'd']], [['a', 'b']]]
5362```
5363
5364Batched indexing into a 3-tensor:
5365
5366```python
5367    indices = [[[1]], [[0]]]
5368    params = [[['a0', 'b0'], ['c0', 'd0']],
5369              [['a1', 'b1'], ['c1', 'd1']]]
5370    output = [[[['a1', 'b1'], ['c1', 'd1']]],
5371              [[['a0', 'b0'], ['c0', 'd0']]]]
5372
5373    indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
5374    params = [[['a0', 'b0'], ['c0', 'd0']],
5375              [['a1', 'b1'], ['c1', 'd1']]]
5376    output = [[['c0', 'd0'], ['a1', 'b1']],
5377              [['a0', 'b0'], ['c1', 'd1']]]
5378
5379
5380    indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
5381    params = [[['a0', 'b0'], ['c0', 'd0']],
5382              [['a1', 'b1'], ['c1', 'd1']]]
5383    output = [['b0', 'b1'], ['d0', 'c1']]
5384```
5385
5386See also `tf.gather` and `tf.batch_gather`.
5387  }];
5388
5389  let arguments = (ins
5390    Arg<TF_Tensor, [{The tensor from which to gather values.}]>:$params,
5391    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices
5392  );
5393
5394  let results = (outs
5395    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
5396shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.}]>:$output
5397  );
5398
5399  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
5400  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
5401}
5402
5403def TF_GatherV2Op : TF_Op<"GatherV2", [NoSideEffect]> {
5404  let summary = [{
5405Gather slices from `params` axis `axis` according to `indices`.
5406  }];
5407
5408  let description = [{
5409`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
5410Produces an output tensor with shape `params.shape[:axis] +
5411indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
5412
5413```python
5414    # Scalar indices (output is rank(params) - 1).
5415    output[a_0, ..., a_n, b_0, ..., b_n] =
5416      params[a_0, ..., a_n, indices, b_0, ..., b_n]
5417
5418    # Vector indices (output is rank(params)).
5419    output[a_0, ..., a_n, i, b_0, ..., b_n] =
5420      params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
5421
5422    # Higher rank indices (output is rank(params) + rank(indices) - 1).
5423    output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
5424      params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
5425```
5426
5427<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
5428<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
5429</div>
5430
5431Note that on CPU, if an out of bound index is found, an error is returned.
5432On GPU, if an out of bound index is found, a 0 is stored in the
5433corresponding output value.
5434
5435See also `tf.batch_gather` and `tf.gather_nd`.
5436  }];
5437
5438  let arguments = (ins
5439    Arg<TF_Tensor, [{The tensor from which to gather values. Must be at least rank
5440`axis + 1`.}]>:$params,
5441    Arg<TF_I32OrI64Tensor, [{Index tensor. Must be in range `[0, params.shape[axis])`.}]>:$indices,
5442    Arg<TF_I32OrI64Tensor, [{The axis in `params` to gather `indices` from. Defaults to the first
5443dimension. Supports negative indexes.}]>:$axis,
5444
5445    DefaultValuedAttr<I64Attr, "0">:$batch_dims
5446  );
5447
5448  let results = (outs
5449    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
5450shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.}]>:$output
5451  );
5452
5453  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
5454  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
5455  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
5456
5457  let verifier = [{
5458    return Verify(*this);
5459  }];
5460}
5461
5462def TF_GeneratorDatasetOp : TF_Op<"GeneratorDataset", [AttrSizedOperandSegments, TF_GeneratorOpSideEffect]> {
5463  let summary = [{
5464Creates a dataset that invokes a function to generate elements.
5465  }];
5466
5467  let arguments = (ins
5468    Variadic<TF_Tensor>:$init_func_other_args,
5469    Variadic<TF_Tensor>:$next_func_other_args,
5470    Variadic<TF_Tensor>:$finalize_func_other_args,
5471
5472    SymbolRefAttr:$init_func,
5473    SymbolRefAttr:$next_func,
5474    SymbolRefAttr:$finalize_func,
5475    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5476    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5477  );
5478
5479  let results = (outs
5480    TF_VariantTensor:$handle
5481  );
5482
5483  TF_DerivedOperandTypeListAttr Tfinalize_func_args = TF_DerivedOperandTypeListAttr<2>;
5484  TF_DerivedOperandTypeListAttr Tinit_func_args = TF_DerivedOperandTypeListAttr<0>;
5485  TF_DerivedOperandTypeListAttr Tnext_func_args = TF_DerivedOperandTypeListAttr<1>;
5486}
5487
5488def TF_GreaterOp : TF_Op<"Greater", [NoSideEffect, ResultsBroadcastableShape]>,
5489                   WithBroadcastableCmpOpBuilder {
5490  let summary = "Returns the truth value of (x > y) element-wise.";
5491
5492  let description = [{
5493*NOTE*: `Greater` supports broadcasting. More about broadcasting
5494[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5495
5496Example:
5497
5498```python
5499x = tf.constant([5, 4, 6])
5500y = tf.constant([5, 2, 5])
5501tf.math.greater(x, y) ==> [False, True, True]
5502
5503x = tf.constant([5, 4, 6])
5504y = tf.constant([5])
5505tf.math.greater(x, y) ==> [False, False, True]
5506```
5507  }];
5508
5509  let arguments = (ins
5510    TF_IntOrFpTensor:$x,
5511    TF_IntOrFpTensor:$y
5512  );
5513
5514  let results = (outs
5515    TF_BoolTensor:$z
5516  );
5517
5518  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5519}
5520
5521def TF_GreaterEqualOp : TF_Op<"GreaterEqual", [NoSideEffect, ResultsBroadcastableShape]>,
5522                        WithBroadcastableCmpOpBuilder {
5523  let summary = "Returns the truth value of (x >= y) element-wise.";
5524
5525  let description = [{
5526*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
5527[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5528
5529Example:
5530
5531```python
5532x = tf.constant([5, 4, 6, 7])
5533y = tf.constant([5, 2, 5, 10])
5534tf.math.greater_equal(x, y) ==> [True, True, True, False]
5535
5536x = tf.constant([5, 4, 6, 7])
5537y = tf.constant([5])
5538tf.math.greater_equal(x, y) ==> [True, False, True, True]
5539```
5540  }];
5541
5542  let arguments = (ins
5543    TF_IntOrFpTensor:$x,
5544    TF_IntOrFpTensor:$y
5545  );
5546
5547  let results = (outs
5548    TF_BoolTensor:$z
5549  );
5550
5551  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5552}
5553
5554def TF_HSVToRGBOp : TF_Op<"HSVToRGB", [NoSideEffect]> {
5555  let summary = "Convert one or more images from HSV to RGB.";
5556
5557  let description = [{
5558Outputs a tensor of the same shape as the `images` tensor, containing the RGB
5559value of the pixels. The output is only well defined if the value in `images`
5560are in `[0,1]`.
5561
5562See `rgb_to_hsv` for a description of the HSV encoding.
5563  }];
5564
5565  let arguments = (ins
5566    Arg<TF_FloatTensor, [{1-D or higher rank. HSV data to convert. Last dimension must be size 3.}]>:$images
5567  );
5568
5569  let results = (outs
5570    Res<TF_FloatTensor, [{`images` converted to RGB.}]>:$output
5571  );
5572
5573  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5574}
5575
5576def TF_HashTableOp : TF_Op<"HashTable", []> {
5577  let summary = "Creates a non-initialized hash table.";
5578
5579  let description = [{
5580This op creates a hash table, specifying the type of its keys and values.
5581Before using the table you will have to initialize it.  After initialization the
5582table will be immutable.
5583  }];
5584
5585  let arguments = (ins
5586    DefaultValuedAttr<StrAttr, "">:$container,
5587    DefaultValuedAttr<StrAttr, "">:$shared_name,
5588    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
5589    TypeAttr:$key_dtype,
5590    TypeAttr:$value_dtype
5591  );
5592
5593  let results = (outs
5594    Res<TF_StrTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
5595  );
5596
5597  let hasCanonicalizer = 1;
5598}
5599
5600def TF_HashTableV2Op : TF_Op<"HashTableV2", []> {
5601  let summary = "Creates a non-initialized hash table.";
5602
5603  let description = [{
5604This op creates a hash table, specifying the type of its keys and values.
5605Before using the table you will have to initialize it.  After initialization the
5606table will be immutable.
5607  }];
5608
5609  let arguments = (ins
5610    DefaultValuedAttr<StrAttr, "">:$container,
5611    DefaultValuedAttr<StrAttr, "">:$shared_name,
5612    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
5613    TypeAttr:$key_dtype,
5614    TypeAttr:$value_dtype
5615  );
5616
5617  let results = (outs
5618    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
5619  );
5620
5621  let builders = [
5622    OpBuilder<(ins "StringAttr":$container, "StringAttr":$shared_name,
5623      "BoolAttr":$use_node_name_sharing, "TypeAttr":$key_dtype, "TypeAttr":$value_dtype),
5624    [{
5625      build($_builder, $_state,
5626      mlir::RankedTensorType::get({},
5627      $_builder.getType<mlir::TF::ResourceType>()),
5628      container, shared_name, use_node_name_sharing, key_dtype, value_dtype);
5629    }]>];
5630}
5631
5632def TF_IFFTOp : TF_Op<"IFFT", [NoSideEffect]> {
5633  let summary = "Inverse fast Fourier transform.";
5634
5635  let description = [{
5636Computes the inverse 1-dimensional discrete Fourier transform over the
5637inner-most dimension of `input`.
5638  }];
5639
5640  let arguments = (ins
5641    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5642  );
5643
5644  let results = (outs
5645    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
5646  dimension of `input` is replaced with its inverse 1D Fourier transform.
5647
5648@compatibility(numpy)
5649Equivalent to np.fft.ifft
5650@end_compatibility}]>:$output
5651  );
5652
5653  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5654}
5655
5656def TF_IFFT2DOp : TF_Op<"IFFT2D", [NoSideEffect]> {
5657  let summary = "Inverse 2D fast Fourier transform.";
5658
5659  let description = [{
5660Computes the inverse 2-dimensional discrete Fourier transform over the
5661inner-most 2 dimensions of `input`.
5662  }];
5663
5664  let arguments = (ins
5665    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5666  );
5667
5668  let results = (outs
5669    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
5670  dimensions of `input` are replaced with their inverse 2D Fourier transform.
5671
5672@compatibility(numpy)
5673Equivalent to np.fft.ifft2
5674@end_compatibility}]>:$output
5675  );
5676
5677  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5678}
5679
5680def TF_IFFT3DOp : TF_Op<"IFFT3D", [NoSideEffect]> {
5681  let summary = "Inverse 3D fast Fourier transform.";
5682
5683  let description = [{
5684Computes the inverse 3-dimensional discrete Fourier transform over the
5685inner-most 3 dimensions of `input`.
5686  }];
5687
5688  let arguments = (ins
5689    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5690  );
5691
5692  let results = (outs
5693    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
5694  dimensions of `input` are replaced with their inverse 3D Fourier transform.
5695
5696@compatibility(numpy)
5697Equivalent to np.fft.ifftn with 3 dimensions.
5698@end_compatibility}]>:$output
5699  );
5700
5701  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5702}
5703
5704def TF_IRFFTOp : TF_Op<"IRFFT", [NoSideEffect]> {
5705  let summary = "Inverse real-valued fast Fourier transform.";
5706
5707  let description = [{
5708Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
5709signal over the inner-most dimension of `input`.
5710
5711The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
5712`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
5713`fft_length` is not provided, it is computed from the size of the inner-most
5714dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
5715compute `input` is odd, it should be provided since it cannot be inferred
5716properly.
5717
5718Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
5719than the corresponding dimension of `input`, the dimension is cropped. If it is
5720larger, the dimension is padded with zeros.
5721  }];
5722
5723  let arguments = (ins
5724    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5725    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
5726  );
5727
5728  let results = (outs
5729    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most
5730  dimension of `input` is replaced with the `fft_length` samples of its inverse
5731  1D Fourier transform.
5732
5733@compatibility(numpy)
5734Equivalent to np.fft.irfft
5735@end_compatibility}]>:$output
5736  );
5737
5738  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5739  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5740}
5741
5742def TF_IRFFT2DOp : TF_Op<"IRFFT2D", [NoSideEffect]> {
5743  let summary = "Inverse 2D real-valued fast Fourier transform.";
5744
5745  let description = [{
5746Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
5747signal over the inner-most 2 dimensions of `input`.
5748
5749The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
5750The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5751the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5752from the size of the inner-most 2 dimensions of `input`. If the FFT length used
5753to compute `input` is odd, it should be provided since it cannot be inferred
5754properly.
5755
5756Along each axis `IRFFT2D` is computed on, if `fft_length` (or
5757`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5758corresponding dimension of `input`, the dimension is cropped. If it is larger,
5759the dimension is padded with zeros.
5760  }];
5761
5762  let arguments = (ins
5763    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5764    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
5765  );
5766
5767  let results = (outs
5768    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 2
5769  dimensions of `input` are replaced with the `fft_length` samples of their
5770  inverse 2D Fourier transform.
5771
5772@compatibility(numpy)
5773Equivalent to np.fft.irfft2
5774@end_compatibility}]>:$output
5775  );
5776
5777  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5778  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5779}
5780
5781def TF_IRFFT3DOp : TF_Op<"IRFFT3D", [NoSideEffect]> {
5782  let summary = "Inverse 3D real-valued fast Fourier transform.";
5783
5784  let description = [{
5785Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
5786signal over the inner-most 3 dimensions of `input`.
5787
5788The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
5789The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5790the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5791from the size of the inner-most 3 dimensions of `input`. If the FFT length used
5792to compute `input` is odd, it should be provided since it cannot be inferred
5793properly.
5794
5795Along each axis `IRFFT3D` is computed on, if `fft_length` (or
5796`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5797corresponding dimension of `input`, the dimension is cropped. If it is larger,
5798the dimension is padded with zeros.
5799  }];
5800
5801  let arguments = (ins
5802    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5803    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
5804  );
5805
5806  let results = (outs
5807    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 3
5808  dimensions of `input` are replaced with the `fft_length` samples of their
5809  inverse 3D real Fourier transform.
5810
5811@compatibility(numpy)
5812Equivalent to np.irfftn with 3 dimensions.
5813@end_compatibility}]>:$output
5814  );
5815
5816  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5817  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5818}
5819
5820def TF_IdentityOp : TF_Op<"Identity", [NoSideEffect, TF_NoConstantFold, TF_OperandsSameAsResultsTypeOrRef]> {
5821  let summary = [{
5822Return a tensor with the same shape and contents as the input tensor or value.
5823  }];
5824
5825  let arguments = (ins
5826    TF_Tensor:$input
5827  );
5828
5829  let results = (outs
5830    TF_Tensor:$output
5831  );
5832
5833  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5834}
5835
5836def TF_IdentityNOp : TF_Op<"IdentityN", [NoSideEffect]> {
5837  let summary = [{
5838Returns a list of tensors with the same shapes and contents as the input
5839  }];
5840
5841  let description = [{
5842tensors.
5843
5844This op can be used to override the gradient for complicated functions. For
5845example, suppose y = f(x) and we wish to apply a custom function g for backprop
5846such that dx = g(dy). In Python,
5847
5848```python
5849with tf.get_default_graph().gradient_override_map(
5850    {'IdentityN': 'OverrideGradientWithG'}):
5851  y, _ = identity_n([f(x), x])
5852
5853@tf.RegisterGradient('OverrideGradientWithG')
5854def ApplyG(op, dy, _):
5855  return [None, g(dy)]  # Do not backprop to f(x).
5856```
5857  }];
5858
5859  let arguments = (ins
5860    Variadic<TF_Tensor>:$input
5861  );
5862
5863  let results = (outs
5864    Variadic<TF_Tensor>:$output
5865  );
5866
5867  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
5868}
5869
5870def TF_IgammaOp : TF_Op<"Igamma", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5871                  WithBroadcastableBinOpBuilder {
5872  let summary = [{
5873Compute the lower regularized incomplete Gamma function `P(a, x)`.
5874  }];
5875
5876  let description = [{
5877The lower regularized incomplete Gamma function is defined as:
5878
5879
5880\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
5881
5882where
5883
5884\\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
5885
5886is the lower incomplete Gamma function.
5887
5888Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
5889Gamma function.
5890  }];
5891
5892  let arguments = (ins
5893    TF_FloatTensor:$a,
5894    TF_FloatTensor:$x
5895  );
5896
5897  let results = (outs
5898    TF_FloatTensor:$z
5899  );
5900
5901  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5902}
5903
5904def TF_IgammaGradAOp : TF_Op<"IgammaGradA", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5905                       WithBroadcastableBinOpBuilder {
5906  let summary = "Computes the gradient of `igamma(a, x)` wrt `a`.";
5907
5908  let arguments = (ins
5909    TF_F32OrF64Tensor:$a,
5910    TF_F32OrF64Tensor:$x
5911  );
5912
5913  let results = (outs
5914    TF_F32OrF64Tensor:$z
5915  );
5916
5917  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5918}
5919
5920def TF_IgammacOp : TF_Op<"Igammac", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5921                   WithBroadcastableBinOpBuilder {
5922  let summary = [{
5923Compute the upper regularized incomplete Gamma function `Q(a, x)`.
5924  }];
5925
5926  let description = [{
5927The upper regularized incomplete Gamma function is defined as:
5928
5929\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
5930
5931where
5932
5933\\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
5934
5935is the upper incomplete Gamma function.
5936
5937Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
5938Gamma function.
5939  }];
5940
5941  let arguments = (ins
5942    TF_FloatTensor:$a,
5943    TF_FloatTensor:$x
5944  );
5945
5946  let results = (outs
5947    TF_FloatTensor:$z
5948  );
5949
5950  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5951}
5952
5953def TF_ImagOp : TF_Op<"Imag", [NoSideEffect, SameOperandsAndResultShape]> {
5954  let summary = "Returns the imaginary part of a complex number.";
5955
5956  let description = [{
5957Given a tensor `input` of complex numbers, this operation returns a tensor of
5958type `float` that is the imaginary part of each element in `input`. All
5959elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
5960is the real part and *b* is the imaginary part returned by this operation.
5961
5962For example:
5963
5964```
5965# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
5966tf.imag(input) ==> [4.75, 5.75]
5967```
5968  }];
5969
5970  let arguments = (ins
5971    TensorOf<[TF_Complex128, TF_Complex64]>:$input
5972  );
5973
5974  let results = (outs
5975    TF_F32OrF64Tensor:$output
5976  );
5977
5978  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5979  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
5980}
5981
5982def TF_InTopKV2Op : TF_Op<"InTopKV2", [NoSideEffect]> {
5983  let summary = "Says whether the targets are in the top `K` predictions.";
5984
5985  let description = [{
5986This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
5987prediction for the target class is among the top `k` predictions among
5988all predictions for example `i`. Note that the behavior of `InTopK` differs
5989from the `TopK` op in its handling of ties; if multiple classes have the
5990same prediction value and straddle the top-`k` boundary, all of those
5991classes are considered to be in the top `k`.
5992
5993More formally, let
5994
5995  \\(predictions_i\\) be the predictions for all classes for example `i`,
5996  \\(targets_i\\) be the target class for example `i`,
5997  \\(out_i\\) be the output for example `i`,
5998
5999$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
6000  }];
6001
6002  let arguments = (ins
6003    Arg<TF_Float32Tensor, [{A `batch_size` x `classes` tensor.}]>:$predictions,
6004    Arg<TF_I32OrI64Tensor, [{A `batch_size` vector of class ids.}]>:$targets,
6005    Arg<TF_I32OrI64Tensor, [{Number of top elements to look at for computing precision.}]>:$k
6006  );
6007
6008  let results = (outs
6009    Res<TF_BoolTensor, [{Computed precision at `k` as a `bool Tensor`.}]>:$precision
6010  );
6011
6012  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
6013}
6014
6015def TF_InfeedDequeueOp : TF_Op<"InfeedDequeue", []> {
6016  let summary = [{
6017A placeholder op for a value that will be fed into the computation.
6018  }];
6019
6020  let arguments = (ins
6021    TF_ShapeAttr:$shape
6022  );
6023
6024  let results = (outs
6025    Res<TF_Tensor, [{A tensor that will be provided using the infeed mechanism.}]>:$output
6026  );
6027
6028  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
6029}
6030
6031def TF_InitializeTableOp : TF_Op<"InitializeTable", []> {
6032  let summary = [{
6033Table initializer that takes two tensors for keys and values respectively.
6034  }];
6035
6036  let arguments = (ins
6037    Arg<TF_StrTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
6038    Arg<TF_Tensor, [{Keys of type Tkey.}]>:$keys,
6039    Arg<TF_Tensor, [{Values of type Tval.}]>:$values
6040  );
6041
6042  let results = (outs);
6043
6044  TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr<1>;
6045  TF_DerivedOperandTypeAttr Tval = TF_DerivedOperandTypeAttr<2>;
6046}
6047
6048def TF_InitializeTableFromDatasetOp : TF_Op<"InitializeTableFromDataset", []> {
6049  let summary = "";
6050
6051  let arguments = (ins
6052    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
6053    TF_VariantTensor:$dataset
6054  );
6055
6056  let results = (outs);
6057}
6058
6059def TF_InitializeTableFromTextFileOp : TF_Op<"InitializeTableFromTextFile", []> {
6060  let summary = "Initializes a table from a text file.";
6061
6062  let description = [{
6063It inserts one key-value pair into the table for each line of the file.
6064The key and value is extracted from the whole line content, elements from the
6065split line based on `delimiter` or the line number (starting from zero).
6066Where to extract the key and value from a line is specified by `key_index` and
6067`value_index`.
6068
6069- A value of -1 means use the line number(starting from zero), expects `int64`.
6070- A value of -2 means use the whole line content, expects `string`.
6071- A value >= 0 means use the index (starting at zero) of the split line based
6072  on `delimiter`.
6073  }];
6074
6075  let arguments = (ins
6076    Arg<TF_StrTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
6077    Arg<TF_StrTensor, [{Filename of a vocabulary text file.}]>:$filename,
6078
6079    Confined<I64Attr, [IntMinValue<-2>]>:$key_index,
6080    Confined<I64Attr, [IntMinValue<-2>]>:$value_index,
6081    Confined<DefaultValuedAttr<I64Attr, "-1">, [IntMinValue<-1>]>:$vocab_size,
6082    DefaultValuedAttr<StrAttr, "\t">:$delimiter,
6083    DefaultValuedAttr<I64Attr, "0">:$offset
6084  );
6085
6086  let results = (outs);
6087}
6088
6089def TF_InitializeTableFromTextFileV2Op : TF_Op<"InitializeTableFromTextFileV2", []> {
6090  let summary = "Initializes a table from a text file.";
6091
6092  let description = [{
6093It inserts one key-value pair into the table for each line of the file.
6094The key and value is extracted from the whole line content, elements from the
6095split line based on `delimiter` or the line number (starting from zero).
6096Where to extract the key and value from a line is specified by `key_index` and
6097`value_index`.
6098
6099- A value of -1 means use the line number(starting from zero), expects `int64`.
6100- A value of -2 means use the whole line content, expects `string`.
6101- A value >= 0 means use the index (starting at zero) of the split line based
6102  on `delimiter`.
6103  }];
6104
6105  let arguments = (ins
6106    Arg<TF_ResourceTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
6107    Arg<TF_StrTensor, [{Filename of a vocabulary text file.}]>:$filename,
6108
6109    Confined<I64Attr, [IntMinValue<-2>]>:$key_index,
6110    Confined<I64Attr, [IntMinValue<-2>]>:$value_index,
6111    Confined<DefaultValuedAttr<I64Attr, "-1">, [IntMinValue<-1>]>:$vocab_size,
6112    DefaultValuedAttr<StrAttr, "\t">:$delimiter,
6113    DefaultValuedAttr<I64Attr, "0">:$offset
6114  );
6115
6116  let results = (outs);
6117}
6118
6119def TF_InitializeTableV2Op : TF_Op<"InitializeTableV2", []> {
6120  let summary = [{
6121Table initializer that takes two tensors for keys and values respectively.
6122  }];
6123
6124  let arguments = (ins
6125    Arg<TF_ResourceTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
6126    Arg<TF_Tensor, [{Keys of type Tkey.}]>:$keys,
6127    Arg<TF_Tensor, [{Values of type Tval.}]>:$values
6128  );
6129
6130  let results = (outs);
6131
6132  TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr<1>;
6133  TF_DerivedOperandTypeAttr Tval = TF_DerivedOperandTypeAttr<2>;
6134}
6135
6136def TF_InplaceAddOp : TF_Op<"InplaceAdd", [NoSideEffect, TF_AllTypesMatch<["x", "y"]>]> {
6137  let summary = "Adds v into specified rows of x.";
6138
6139  let description = [{
6140Computes y = x; y[i, :] += v; return y.
6141  }];
6142
6143  let arguments = (ins
6144    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$x,
6145    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
6146    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
6147  );
6148
6149  let results = (outs
6150    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
6151  );
6152
6153  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6154}
6155
6156def TF_InplaceUpdateOp : TF_Op<"InplaceUpdate", [NoSideEffect]> {
6157  let summary = "Updates specified rows 'i' with values 'v'.";
6158
6159  let description = [{
6160Computes `x[i, :] = v; return x`.
6161
6162Originally this function is mutative however for compilation we make this
6163operation create / operate on a copy of `x`.
6164  }];
6165
6166  let arguments = (ins
6167    Arg<TF_Tensor, [{A tensor of type `T`.}]>:$x,
6168    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
6169    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
6170  );
6171
6172  let results = (outs
6173    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
6174  );
6175
6176  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6177}
6178
6179def TF_InvOp : TF_Op<"Inv", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6180  let summary = "Computes the reciprocal of x element-wise.";
6181
6182  let description = [{
6183I.e., \\(y = 1 / x\\).
6184  }];
6185
6186  let arguments = (ins
6187    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
6188  );
6189
6190  let results = (outs
6191    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
6192  );
6193
6194  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6195}
6196
6197def TF_InvertOp : TF_Op<"Invert", [Involution, NoSideEffect, SameOperandsAndResultType]> {
6198  let summary = [{
6199Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
6200  }];
6201
6202  let description = [{
6203Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
6204This operation is performed on each element of the tensor argument `x`.
6205
6206Example:
6207```python
6208import tensorflow as tf
6209from tensorflow.python.ops import bitwise_ops
6210
6211# flip 2 (00000010) to -3 (11111101)
6212tf.assert_equal(-3, bitwise_ops.invert(2))
6213
6214dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
6215              dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
6216
6217inputs = [0, 5, 3, 14]
6218for dtype in dtype_list:
6219  # Because of issues with negative numbers, let's test this indirectly.
6220  # 1. invert(a) and a = 0
6221  # 2. invert(a) or a = invert(0)
6222  input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
6223  not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
6224                                      input_tensor, bitwise_ops.invert(input_tensor)),
6225                                    bitwise_ops.bitwise_or(
6226                                      input_tensor, bitwise_ops.invert(input_tensor)),
6227                                    bitwise_ops.invert(
6228                                      tf.constant(0, dtype=dtype))]
6229
6230  expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
6231  tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
6232
6233  expected = tf.cast([not_0] * 4, tf.float32)
6234  tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
6235
6236  # For unsigned dtypes let's also check the result directly.
6237  if dtype.is_unsigned:
6238    inverted = bitwise_ops.invert(input_tensor)
6239    expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
6240    tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
6241```
6242  }];
6243
6244  let arguments = (ins
6245    TF_IntTensor:$x
6246  );
6247
6248  let results = (outs
6249    TF_IntTensor:$y
6250  );
6251
6252  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6253}
6254
6255def TF_InvertPermutationOp : TF_Op<"InvertPermutation", [NoSideEffect]> {
6256  let summary = "Computes the inverse permutation of a tensor.";
6257
6258  let description = [{
6259This operation computes the inverse of an index permutation. It takes a 1-D
6260integer tensor `x`, which represents the indices of a zero-based array, and
6261swaps each value with its index position. In other words, for an output tensor
6262`y` and an input tensor `x`, this operation computes the following:
6263
6264`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
6265
6266The values must include 0. There can be no duplicate values or negative values.
6267
6268For example:
6269
6270```
6271# tensor `x` is [3, 4, 0, 2, 1]
6272invert_permutation(x) ==> [2, 4, 3, 0, 1]
6273```
6274  }];
6275
6276  let arguments = (ins
6277    Arg<TF_I32OrI64Tensor, [{1-D.}]>:$x
6278  );
6279
6280  let results = (outs
6281    Res<TF_I32OrI64Tensor, [{1-D.}]>:$y
6282  );
6283
6284  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6285
6286  let verifier = [{
6287    return Verify(*this);
6288  }];
6289}
6290
6291def TF_IsFiniteOp : TF_Op<"IsFinite", [NoSideEffect, SameOperandsAndResultShape]> {
6292  let summary = "Returns which elements of x are finite.";
6293
6294  let description = [{
6295@compatibility(numpy)
6296Equivalent to np.isfinite
6297@end_compatibility
6298
6299Example:
6300
6301```python
6302x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
6303tf.math.is_finite(x) ==> [True, True, True, False, False]
6304```
6305  }];
6306
6307  let arguments = (ins
6308    TF_FloatTensor:$x
6309  );
6310
6311  let results = (outs
6312    TF_BoolTensor:$y
6313  );
6314
6315  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6316}
6317
6318def TF_IsInfOp : TF_Op<"IsInf", [NoSideEffect, SameOperandsAndResultShape]> {
6319  let summary = "Returns which elements of x are Inf.";
6320
6321  let description = [{
6322@compatibility(numpy)
6323Equivalent to np.isinf
6324@end_compatibility
6325
6326Example:
6327
6328```python
6329x = tf.constant([5.0, np.inf, 6.8, np.inf])
6330tf.math.is_inf(x) ==> [False, True, False, True]
6331```
6332  }];
6333
6334  let arguments = (ins
6335    TF_FloatTensor:$x
6336  );
6337
6338  let results = (outs
6339    TF_BoolTensor:$y
6340  );
6341
6342  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6343}
6344
6345def TF_IsNanOp : TF_Op<"IsNan", [NoSideEffect, SameOperandsAndResultShape]> {
6346  let summary = "Returns which elements of x are NaN.";
6347
6348  let description = [{
6349@compatibility(numpy)
6350Equivalent to np.isnan
6351@end_compatibility
6352
6353Example:
6354
6355```python
6356x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
6357tf.math.is_nan(x) ==> [False, True, False, True, False]
6358```
6359  }];
6360
6361  let arguments = (ins
6362    TF_FloatTensor:$x
6363  );
6364
6365  let results = (outs
6366    TF_BoolTensor:$y
6367  );
6368
6369  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6370}
6371
6372def TF_IteratorOp : TF_Op<"Iterator", []> {
6373  let summary = "A container for an iterator resource.";
6374
6375  let arguments = (ins
6376    StrAttr:$shared_name,
6377    StrAttr:$container,
6378    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
6379    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
6380  );
6381
6382  let results = (outs
6383    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator"
6384or "IteratorGetNext" op.}], [TF_DatasetIteratorAlloc]>:$handle
6385  );
6386}
6387
6388def TF_IteratorFromStringHandleOp : TF_Op<"IteratorFromStringHandle", []> {
6389  let summary = [{
6390Converts the given string representing a handle to an iterator to a resource.
6391  }];
6392
6393  let arguments = (ins
6394    Arg<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle,
6395
6396    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
6397    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
6398  );
6399
6400  let results = (outs
6401    Res<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorAlloc]>:$resource_handle
6402  );
6403}
6404
6405def TF_IteratorFromStringHandleV2Op : TF_Op<"IteratorFromStringHandleV2", []> {
6406  let summary = "";
6407
6408  let arguments = (ins
6409    TF_StrTensor:$string_handle,
6410
6411    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
6412    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
6413  );
6414
6415  let results = (outs
6416    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
6417  );
6418}
6419
6420def TF_IteratorGetNextOp : TF_Op<"IteratorGetNext", []> {
6421  let summary = "Gets the next output from the given iterator .";
6422
6423  let arguments = (ins
6424    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
6425  );
6426
6427  let results = (outs
6428    Variadic<TF_Tensor>:$components
6429  );
6430
6431  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
6432  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
6433}
6434
6435def TF_IteratorGetNextAsOptionalOp : TF_Op<"IteratorGetNextAsOptional", []> {
6436  let summary = [{
6437Gets the next output from the given iterator as an Optional variant.
6438  }];
6439
6440  let arguments = (ins
6441    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator,
6442
6443    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
6444    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
6445  );
6446
6447  let results = (outs
6448    TF_VariantTensor:$optional
6449  );
6450}
6451
6452def TF_IteratorGetNextSyncOp : TF_Op<"IteratorGetNextSync", []> {
6453  let summary = "Gets the next output from the given iterator.";
6454
6455  let description = [{
6456This operation is a synchronous version IteratorGetNext. It should only be used
6457in situations where the iterator does not block the calling thread, or where
6458the calling thread is not a member of the thread pool used to execute parallel
6459operations (e.g. in eager mode).
6460  }];
6461
6462  let arguments = (ins
6463    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
6464  );
6465
6466  let results = (outs
6467    Variadic<TF_Tensor>:$components
6468  );
6469
6470  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
6471  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
6472}
6473
6474def TF_IteratorToStringHandleOp : TF_Op<"IteratorToStringHandle", []> {
6475  let summary = [{
6476Converts the given `resource_handle` representing an iterator to a string.
6477  }];
6478
6479  let arguments = (ins
6480    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle
6481  );
6482
6483  let results = (outs
6484    Res<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle
6485  );
6486}
6487
6488def TF_IteratorV2Op : TF_Op<"IteratorV2", []> {
6489  let summary = "";
6490
6491  let arguments = (ins
6492    StrAttr:$shared_name,
6493    StrAttr:$container,
6494    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
6495    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
6496  );
6497
6498  let results = (outs
6499    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
6500  );
6501}
6502
6503def TF_KthOrderStatisticOp : TF_Op<"KthOrderStatistic", [NoSideEffect]> {
6504  let summary = "Computes the Kth order statistic of a data set. The current";
6505
6506  let description = [{
6507implementation uses a binary search requiring exactly 32 passes over
6508the input data. The running time is linear with respect to input
6509size. The median-of-medians algorithm is probably faster, but is
6510difficult to implement efficiently in XLA. The implementation imposes
6511a total ordering on floats. The ordering is consistent with the usual
6512partial order.  Positive NaNs are greater than positive
6513infinity. Negative NaNs are less than negative infinity. NaNs with
6514distinct payloads are treated as distinct. Subnormal numbers are
6515preserved (not flushed to zero). Positive infinity is greater than all
6516numbers. Negative infinity is less than all numbers. Positive is
6517greater than negative zero. There are less than k values greater than
6518the kth order statistic. There are at least k values greater than or
6519equal to the Kth order statistic. The semantics are not the same as
6520top_k_unique.
6521  }];
6522
6523  let arguments = (ins
6524    TF_Float32Tensor:$input,
6525
6526    I64Attr:$k
6527  );
6528
6529  let results = (outs
6530    TF_Float32Tensor:$output
6531  );
6532}
6533
6534def TF_L2LossOp : TF_Op<"L2Loss", [NoSideEffect]> {
6535  let summary = "L2 Loss.";
6536
6537  let description = [{
6538Computes half the L2 norm of a tensor without the `sqrt`:
6539
6540    output = sum(t ** 2) / 2
6541  }];
6542
6543  let arguments = (ins
6544    Arg<TF_FloatTensor, [{Typically 2-D, but may have any dimensions.}]>:$t
6545  );
6546
6547  let results = (outs
6548    Res<TF_FloatTensor, [{0-D.}]>:$output
6549  );
6550
6551  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6552}
6553
6554def TF_LRNOp : TF_Op<"LRN", [NoSideEffect]> {
6555  let summary = "Local Response Normalization.";
6556
6557  let description = [{
6558The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
6559dimension), and each vector is normalized independently.  Within a given vector,
6560each component is divided by the weighted, squared sum of inputs within
6561`depth_radius`.  In detail,
6562
6563    sqr_sum[a, b, c, d] =
6564        sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
6565    output = input / (bias + alpha * sqr_sum) ** beta
6566
6567For details, see [Krizhevsky et al., ImageNet classification with deep
6568convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
6569  }];
6570
6571  let arguments = (ins
6572    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D.}]>:$input,
6573
6574    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
6575    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
6576    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
6577    DefaultValuedAttr<F32Attr, "0.5f">:$beta
6578  );
6579
6580  let results = (outs
6581    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
6582  );
6583
6584  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6585}
6586
6587def TF_LRNGradOp : TF_Op<"LRNGrad", [NoSideEffect]> {
6588  let summary = "Gradients for Local Response Normalization.";
6589
6590  let arguments = (ins
6591    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_grads,
6592    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_image,
6593    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$output_image,
6594
6595    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
6596    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
6597    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
6598    DefaultValuedAttr<F32Attr, "0.5f">:$beta
6599  );
6600
6601  let results = (outs
6602    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The gradients for LRN.}]>:$output
6603  );
6604
6605  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6606}
6607
6608def TF_LeakyReluOp : TF_Op<"LeakyRelu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6609  let summary = "Computes rectified linear: `max(features, features * alpha)`.";
6610
6611  let arguments = (ins
6612    TF_FloatTensor:$features,
6613
6614    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
6615  );
6616
6617  let results = (outs
6618    TF_FloatTensor:$activations
6619  );
6620
6621  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6622
6623  let hasFolder = 1;
6624}
6625
6626def TF_LeakyReluGradOp : TF_Op<"LeakyReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6627  let summary = [{
6628Computes rectified linear gradients for a LeakyRelu operation.
6629  }];
6630
6631  let arguments = (ins
6632    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding LeakyRelu operation.}]>:$gradients,
6633    Arg<TF_FloatTensor, [{The features passed as input to the corresponding LeakyRelu operation,
6634OR the outputs of that operation (both work equivalently).}]>:$features,
6635
6636    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
6637  );
6638
6639  let results = (outs
6640    Res<TF_FloatTensor, [{`gradients * (features > 0) + alpha * gradients * (features <= 0)`.}]>:$backprops
6641  );
6642
6643  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6644}
6645
6646def TF_LeftShiftOp : TF_Op<"LeftShift", [NoSideEffect, ResultsBroadcastableShape]>,
6647                     WithBroadcastableBinOpBuilder {
6648  let summary = "Elementwise computes the bitwise left-shift of `x` and `y`.";
6649
6650  let description = [{
6651If `y` is negative, or greater than or equal to the width of `x` in bits the
6652result is implementation defined.
6653
6654Example:
6655
6656```python
6657import tensorflow as tf
6658from tensorflow.python.ops import bitwise_ops
6659import numpy as np
6660dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
6661
6662for dtype in dtype_list:
6663  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
6664  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
6665
6666  left_shift_result = bitwise_ops.left_shift(lhs, rhs)
6667
6668  print(left_shift_result)
6669
6670# This will print:
6671# tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
6672# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
6673# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
6674# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
6675
6676lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
6677rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
6678bitwise_ops.left_shift(lhs, rhs)
6679# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
6680```
6681  }];
6682
6683  let arguments = (ins
6684    TF_IntTensor:$x,
6685    TF_IntTensor:$y
6686  );
6687
6688  let results = (outs
6689    TF_IntTensor:$z
6690  );
6691
6692  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6693}
6694
6695def TF_LessOp : TF_Op<"Less", [NoSideEffect, ResultsBroadcastableShape]>,
6696                WithBroadcastableCmpOpBuilder {
6697  let summary = "Returns the truth value of (x < y) element-wise.";
6698
6699  let description = [{
6700*NOTE*: `Less` supports broadcasting. More about broadcasting
6701[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6702
6703Example:
6704
6705```python
6706x = tf.constant([5, 4, 6])
6707y = tf.constant([5])
6708tf.math.less(x, y) ==> [False, True, False]
6709
6710x = tf.constant([5, 4, 6])
6711y = tf.constant([5, 6, 7])
6712tf.math.less(x, y) ==> [False, True, True]
6713```
6714  }];
6715
6716  let arguments = (ins
6717    TF_IntOrFpTensor:$x,
6718    TF_IntOrFpTensor:$y
6719  );
6720
6721  let results = (outs
6722    TF_BoolTensor:$z
6723  );
6724
6725  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6726}
6727
6728def TF_LessEqualOp : TF_Op<"LessEqual", [NoSideEffect, ResultsBroadcastableShape]>,
6729                     WithBroadcastableCmpOpBuilder {
6730  let summary = "Returns the truth value of (x <= y) element-wise.";
6731
6732  let description = [{
6733*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
6734[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6735
6736Example:
6737
6738```python
6739x = tf.constant([5, 4, 6])
6740y = tf.constant([5])
6741tf.math.less_equal(x, y) ==> [True, True, False]
6742
6743x = tf.constant([5, 4, 6])
6744y = tf.constant([5, 6, 6])
6745tf.math.less_equal(x, y) ==> [True, True, True]
6746```
6747  }];
6748
6749  let arguments = (ins
6750    TF_IntOrFpTensor:$x,
6751    TF_IntOrFpTensor:$y
6752  );
6753
6754  let results = (outs
6755    TF_BoolTensor:$z
6756  );
6757
6758  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6759}
6760
6761def TF_LgammaOp : TF_Op<"Lgamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6762  let summary = [{
6763Computes the log of the absolute value of `Gamma(x)` element-wise.
6764  }];
6765
6766  let description = [{
6767For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
6768  `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
6769
6770Example:
6771
6772```python
6773x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
6774tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
6775```
6776  }];
6777
6778  let arguments = (ins
6779    TF_FloatTensor:$x
6780  );
6781
6782  let results = (outs
6783    TF_FloatTensor:$y
6784  );
6785
6786  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6787}
6788
6789def TF_LinSpaceOp : TF_Op<"LinSpace", [NoSideEffect]> {
6790  let summary = "Generates values in an interval.";
6791
6792  let description = [{
6793A sequence of `num` evenly-spaced values are generated beginning at `start`.
6794If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
6795so that the last one is exactly `stop`.
6796
6797For example:
6798
6799```
6800tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
6801```
6802  }];
6803
6804  let arguments = (ins
6805    Arg<TF_FloatTensor, [{0-D tensor. First entry in the range.}]>:$start,
6806    Arg<TF_FloatTensor, [{0-D tensor. Last entry in the range.}]>:$stop,
6807    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of values to generate.}]>:$num
6808  );
6809
6810  let results = (outs
6811    Res<TF_FloatTensor, [{1-D. The generated values.}]>:$output
6812  );
6813
6814  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6815  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<2>;
6816}
6817
6818def TF_ListDiffOp : TF_Op<"ListDiff", [NoSideEffect]> {
6819  let summary = [{
6820Computes the difference between two lists of numbers or strings.
6821  }];
6822
6823  let description = [{
6824Given a list `x` and a list `y`, this operation returns a list `out` that
6825represents all values that are in `x` but not in `y`. The returned list `out`
6826is sorted in the same order that the numbers appear in `x` (duplicates are
6827preserved). This operation also returns a list `idx` that represents the
6828position of each `out` element in `x`. In other words:
6829
6830`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
6831
6832For example, given this input:
6833
6834```
6835x = [1, 2, 3, 4, 5, 6]
6836y = [1, 3, 5]
6837```
6838
6839This operation would return:
6840
6841```
6842out ==> [2, 4, 6]
6843idx ==> [1, 3, 5]
6844```
6845  }];
6846
6847  let arguments = (ins
6848    Arg<TF_Tensor, [{1-D. Values to keep.}]>:$x,
6849    Arg<TF_Tensor, [{1-D. Values to remove.}]>:$y
6850  );
6851
6852  let results = (outs
6853    Res<TF_Tensor, [{1-D. Values present in `x` but not in `y`.}]>:$out,
6854    Res<TF_I32OrI64Tensor, [{1-D. Positions of `x` values preserved in `out`.}]>:$idx
6855  );
6856
6857  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6858  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
6859}
6860
6861def TF_LoadTPUEmbeddingADAMParametersOp : TF_Op<"LoadTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
6862  let summary = "Load ADAM embedding parameters.";
6863
6864  let description = [{
6865An op that loads optimization parameters into HBM for embedding. Must be
6866preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6867embedding table configuration. For example, this op is used to install
6868parameters that are loaded from a checkpoint before a training loop is
6869executed.
6870  }];
6871
6872  let arguments = (ins
6873    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
6874    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
6875    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
6876
6877    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6878    DefaultValuedAttr<StrAttr, "">:$table_name,
6879    I64Attr:$num_shards,
6880    I64Attr:$shard_id,
6881    DefaultValuedAttr<StrAttr, "">:$config
6882  );
6883
6884  let results = (outs);
6885}
6886
6887def TF_LoadTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6888  let summary = "";
6889
6890  let arguments = (ins
6891    TF_Float32Tensor:$parameters,
6892    TF_Float32Tensor:$momenta,
6893    TF_Float32Tensor:$velocities,
6894    TF_Float32Tensor:$gradient_accumulators,
6895
6896    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6897    DefaultValuedAttr<StrAttr, "">:$table_name,
6898    I64Attr:$num_shards,
6899    I64Attr:$shard_id,
6900    DefaultValuedAttr<StrAttr, "">:$config
6901  );
6902
6903  let results = (outs);
6904}
6905
6906def TF_LoadTPUEmbeddingAdadeltaParametersOp : TF_Op<"LoadTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
6907  let summary = "Load Adadelta embedding parameters.";
6908
6909  let description = [{
6910An op that loads optimization parameters into HBM for embedding. Must be
6911preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6912embedding table configuration. For example, this op is used to install
6913parameters that are loaded from a checkpoint before a training loop is
6914executed.
6915  }];
6916
6917  let arguments = (ins
6918    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
6919    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
6920    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
6921
6922    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6923    DefaultValuedAttr<StrAttr, "">:$table_name,
6924    I64Attr:$num_shards,
6925    I64Attr:$shard_id,
6926    DefaultValuedAttr<StrAttr, "">:$config
6927  );
6928
6929  let results = (outs);
6930}
6931
6932def TF_LoadTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6933  let summary = "";
6934
6935  let arguments = (ins
6936    TF_Float32Tensor:$parameters,
6937    TF_Float32Tensor:$accumulators,
6938    TF_Float32Tensor:$updates,
6939    TF_Float32Tensor:$gradient_accumulators,
6940
6941    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6942    DefaultValuedAttr<StrAttr, "">:$table_name,
6943    I64Attr:$num_shards,
6944    I64Attr:$shard_id,
6945    DefaultValuedAttr<StrAttr, "">:$config
6946  );
6947
6948  let results = (outs);
6949}
6950
6951def TF_LoadTPUEmbeddingAdagradParametersOp : TF_Op<"LoadTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
6952  let summary = "Load Adagrad embedding parameters.";
6953
6954  let description = [{
6955An op that loads optimization parameters into HBM for embedding. Must be
6956preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6957embedding table configuration. For example, this op is used to install
6958parameters that are loaded from a checkpoint before a training loop is
6959executed.
6960  }];
6961
6962  let arguments = (ins
6963    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
6964    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
6965
6966    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6967    DefaultValuedAttr<StrAttr, "">:$table_name,
6968    I64Attr:$num_shards,
6969    I64Attr:$shard_id,
6970    DefaultValuedAttr<StrAttr, "">:$config
6971  );
6972
6973  let results = (outs);
6974}
6975
6976def TF_LoadTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6977  let summary = "";
6978
6979  let arguments = (ins
6980    TF_Float32Tensor:$parameters,
6981    TF_Float32Tensor:$accumulators,
6982    TF_Float32Tensor:$gradient_accumulators,
6983
6984    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6985    DefaultValuedAttr<StrAttr, "">:$table_name,
6986    I64Attr:$num_shards,
6987    I64Attr:$shard_id,
6988    DefaultValuedAttr<StrAttr, "">:$config
6989  );
6990
6991  let results = (outs);
6992}
6993
6994def TF_LoadTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
6995  let summary = "Load centered RMSProp embedding parameters.";
6996
6997  let description = [{
6998An op that loads optimization parameters into HBM for embedding. Must be
6999preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7000embedding table configuration. For example, this op is used to install
7001parameters that are loaded from a checkpoint before a training loop is
7002executed.
7003  }];
7004
7005  let arguments = (ins
7006    Arg<TF_Float32Tensor, [{Value of parameters used in the centered RMSProp optimization algorithm.}]>:$parameters,
7007    Arg<TF_Float32Tensor, [{Value of ms used in the centered RMSProp optimization algorithm.}]>:$ms,
7008    Arg<TF_Float32Tensor, [{Value of mom used in the centered RMSProp optimization algorithm.}]>:$mom,
7009    Arg<TF_Float32Tensor, [{Value of mg used in the centered RMSProp optimization algorithm.}]>:$mg,
7010
7011    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7012    DefaultValuedAttr<StrAttr, "">:$table_name,
7013    I64Attr:$num_shards,
7014    I64Attr:$shard_id,
7015    DefaultValuedAttr<StrAttr, "">:$config
7016  );
7017
7018  let results = (outs);
7019}
7020
7021def TF_LoadTPUEmbeddingFTRLParametersOp : TF_Op<"LoadTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
7022  let summary = "Load FTRL embedding parameters.";
7023
7024  let description = [{
7025An op that loads optimization parameters into HBM for embedding. Must be
7026preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7027embedding table configuration. For example, this op is used to install
7028parameters that are loaded from a checkpoint before a training loop is
7029executed.
7030  }];
7031
7032  let arguments = (ins
7033    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
7034    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
7035    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
7036
7037    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7038    DefaultValuedAttr<StrAttr, "">:$table_name,
7039    I64Attr:$num_shards,
7040    I64Attr:$shard_id,
7041    DefaultValuedAttr<StrAttr, "">:$config
7042  );
7043
7044  let results = (outs);
7045}
7046
7047def TF_LoadTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7048  let summary = "";
7049
7050  let arguments = (ins
7051    TF_Float32Tensor:$parameters,
7052    TF_Float32Tensor:$accumulators,
7053    TF_Float32Tensor:$linears,
7054    TF_Float32Tensor:$gradient_accumulators,
7055
7056    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7057    DefaultValuedAttr<StrAttr, "">:$table_name,
7058    I64Attr:$num_shards,
7059    I64Attr:$shard_id,
7060    DefaultValuedAttr<StrAttr, "">:$config
7061  );
7062
7063  let results = (outs);
7064}
7065
7066def TF_LoadTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"LoadTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
7067  let summary = "Load MDL Adagrad Light embedding parameters.";
7068
7069  let description = [{
7070An op that loads optimization parameters into HBM for embedding. Must be
7071preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7072embedding table configuration. For example, this op is used to install
7073parameters that are loaded from a checkpoint before a training loop is
7074executed.
7075  }];
7076
7077  let arguments = (ins
7078    Arg<TF_Float32Tensor, [{Value of parameters used in the MDL Adagrad Light optimization algorithm.}]>:$parameters,
7079    Arg<TF_Float32Tensor, [{Value of accumulators used in the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
7080    Arg<TF_Float32Tensor, [{Value of weights used in the MDL Adagrad Light optimization algorithm.}]>:$weights,
7081    Arg<TF_Float32Tensor, [{Value of benefits used in the MDL Adagrad Light optimization algorithm.}]>:$benefits,
7082
7083    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7084    DefaultValuedAttr<StrAttr, "">:$table_name,
7085    I64Attr:$num_shards,
7086    I64Attr:$shard_id,
7087    DefaultValuedAttr<StrAttr, "">:$config
7088  );
7089
7090  let results = (outs);
7091}
7092
7093def TF_LoadTPUEmbeddingMomentumParametersOp : TF_Op<"LoadTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
7094  let summary = "Load Momentum embedding parameters.";
7095
7096  let description = [{
7097An op that loads optimization parameters into HBM for embedding. Must be
7098preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7099embedding table configuration. For example, this op is used to install
7100parameters that are loaded from a checkpoint before a training loop is
7101executed.
7102  }];
7103
7104  let arguments = (ins
7105    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
7106    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
7107
7108    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7109    DefaultValuedAttr<StrAttr, "">:$table_name,
7110    I64Attr:$num_shards,
7111    I64Attr:$shard_id,
7112    DefaultValuedAttr<StrAttr, "">:$config
7113  );
7114
7115  let results = (outs);
7116}
7117
7118def TF_LoadTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7119  let summary = "";
7120
7121  let arguments = (ins
7122    TF_Float32Tensor:$parameters,
7123    TF_Float32Tensor:$momenta,
7124    TF_Float32Tensor:$gradient_accumulators,
7125
7126    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7127    DefaultValuedAttr<StrAttr, "">:$table_name,
7128    I64Attr:$num_shards,
7129    I64Attr:$shard_id,
7130    DefaultValuedAttr<StrAttr, "">:$config
7131  );
7132
7133  let results = (outs);
7134}
7135
7136def TF_LoadTPUEmbeddingProximalAdagradParametersOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
7137  let summary = "Load proximal Adagrad embedding parameters.";
7138
7139  let description = [{
7140An op that loads optimization parameters into HBM for embedding. Must be
7141preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7142embedding table configuration. For example, this op is used to install
7143parameters that are loaded from a checkpoint before a training loop is
7144executed.
7145  }];
7146
7147  let arguments = (ins
7148    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
7149    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
7150
7151    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7152    DefaultValuedAttr<StrAttr, "">:$table_name,
7153    I64Attr:$num_shards,
7154    I64Attr:$shard_id,
7155    DefaultValuedAttr<StrAttr, "">:$config
7156  );
7157
7158  let results = (outs);
7159}
7160
7161def TF_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7162  let summary = "";
7163
7164  let arguments = (ins
7165    TF_Float32Tensor:$parameters,
7166    TF_Float32Tensor:$accumulators,
7167    TF_Float32Tensor:$gradient_accumulators,
7168
7169    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7170    DefaultValuedAttr<StrAttr, "">:$table_name,
7171    I64Attr:$num_shards,
7172    I64Attr:$shard_id,
7173    DefaultValuedAttr<StrAttr, "">:$config
7174  );
7175
7176  let results = (outs);
7177}
7178
7179def TF_LoadTPUEmbeddingProximalYogiParametersOp : TF_Op<"LoadTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
7180  let summary = "";
7181
7182  let arguments = (ins
7183    TF_Float32Tensor:$parameters,
7184    TF_Float32Tensor:$v,
7185    TF_Float32Tensor:$m,
7186
7187    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7188    DefaultValuedAttr<StrAttr, "">:$table_name,
7189    I64Attr:$num_shards,
7190    I64Attr:$shard_id,
7191    DefaultValuedAttr<StrAttr, "">:$config
7192  );
7193
7194  let results = (outs);
7195}
7196
7197def TF_LoadTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7198  let summary = "";
7199
7200  let arguments = (ins
7201    TF_Float32Tensor:$parameters,
7202    TF_Float32Tensor:$v,
7203    TF_Float32Tensor:$m,
7204    TF_Float32Tensor:$gradient_accumulators,
7205
7206    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7207    DefaultValuedAttr<StrAttr, "">:$table_name,
7208    I64Attr:$num_shards,
7209    I64Attr:$shard_id,
7210    DefaultValuedAttr<StrAttr, "">:$config
7211  );
7212
7213  let results = (outs);
7214}
7215
7216def TF_LoadTPUEmbeddingRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
7217  let summary = "Load RMSProp embedding parameters.";
7218
7219  let description = [{
7220An op that loads optimization parameters into HBM for embedding. Must be
7221preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7222embedding table configuration. For example, this op is used to install
7223parameters that are loaded from a checkpoint before a training loop is
7224executed.
7225  }];
7226
7227  let arguments = (ins
7228    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
7229    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
7230    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
7231
7232    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7233    DefaultValuedAttr<StrAttr, "">:$table_name,
7234    I64Attr:$num_shards,
7235    I64Attr:$shard_id,
7236    DefaultValuedAttr<StrAttr, "">:$config
7237  );
7238
7239  let results = (outs);
7240}
7241
7242def TF_LoadTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7243  let summary = "";
7244
7245  let arguments = (ins
7246    TF_Float32Tensor:$parameters,
7247    TF_Float32Tensor:$ms,
7248    TF_Float32Tensor:$mom,
7249    TF_Float32Tensor:$gradient_accumulators,
7250
7251    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7252    DefaultValuedAttr<StrAttr, "">:$table_name,
7253    I64Attr:$num_shards,
7254    I64Attr:$shard_id,
7255    DefaultValuedAttr<StrAttr, "">:$config
7256  );
7257
7258  let results = (outs);
7259}
7260
7261def TF_LoadTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
7262  let summary = "Load SGD embedding parameters.";
7263
7264  let description = [{
7265An op that loads optimization parameters into HBM for embedding. Must be
7266preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
7267embedding table configuration. For example, this op is used to install
7268parameters that are loaded from a checkpoint before a training loop is
7269executed.
7270  }];
7271
7272  let arguments = (ins
7273    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
7274
7275    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7276    DefaultValuedAttr<StrAttr, "">:$table_name,
7277    I64Attr:$num_shards,
7278    I64Attr:$shard_id,
7279    DefaultValuedAttr<StrAttr, "">:$config
7280  );
7281
7282  let results = (outs);
7283}
7284
7285def TF_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
7286  let summary = "";
7287
7288  let arguments = (ins
7289    TF_Float32Tensor:$parameters,
7290    TF_Float32Tensor:$gradient_accumulators,
7291
7292    DefaultValuedAttr<I64Attr, "-1">:$table_id,
7293    DefaultValuedAttr<StrAttr, "">:$table_name,
7294    I64Attr:$num_shards,
7295    I64Attr:$shard_id,
7296    DefaultValuedAttr<StrAttr, "">:$config
7297  );
7298
7299  let results = (outs);
7300}
7301
7302def TF_LogOp : TF_Op<"Log", [NoSideEffect, SameOperandsAndResultType]> {
7303  let summary = "Computes natural logarithm of x element-wise.";
7304
7305  let description = [{
7306I.e., \\(y = \log_e x\\).
7307
7308Example:
7309
7310```python
7311x = tf.constant([0, 0.5, 1, 5])
7312tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
7313```
7314  }];
7315
7316  let arguments = (ins
7317    TF_FpOrComplexTensor:$x
7318  );
7319
7320  let results = (outs
7321    TF_FpOrComplexTensor:$y
7322  );
7323
7324  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7325
7326  let hasCanonicalizer = 1;
7327}
7328
7329def TF_Log1pOp : TF_Op<"Log1p", [NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
7330  let summary = "Computes natural logarithm of (1 + x) element-wise.";
7331
7332  let description = [{
7333I.e., \\(y = \log_e (1 + x)\\).
7334
7335Example:
7336
7337```python
7338x = tf.constant([0, 0.5, 1, 5])
7339tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
7340```
7341  }];
7342
7343  let arguments = (ins
7344    TF_FpOrComplexTensor:$x
7345  );
7346
7347  let results = (outs
7348    TF_FpOrComplexTensor:$y
7349  );
7350
7351  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7352}
7353
7354def TF_LogSoftmaxOp : TF_Op<"LogSoftmax", [NoSideEffect, SameOperandsAndResultType]> {
7355  let summary = "Computes log softmax activations.";
7356
7357  let description = [{
7358For each batch `i` and class `j` we have
7359
7360    logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
7361  }];
7362
7363  let arguments = (ins
7364    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
7365  );
7366
7367  let results = (outs
7368    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$logsoftmax
7369  );
7370
7371  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7372}
7373
7374def TF_LogicalAndOp : TF_Op<"LogicalAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
7375                      WithBroadcastableBinOpBuilder {
7376  let summary = "Returns the truth value of x AND y element-wise.";
7377
7378  let description = [{
7379*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
7380[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
7381  }];
7382
7383  let arguments = (ins
7384    TF_BoolTensor:$x,
7385    TF_BoolTensor:$y
7386  );
7387
7388  let results = (outs
7389    TF_BoolTensor:$z
7390  );
7391}
7392
7393def TF_LogicalNotOp : TF_Op<"LogicalNot", [Involution, NoSideEffect, SameOperandsAndResultType]> {
7394  let summary = "Returns the truth value of `NOT x` element-wise.";
7395
7396  let arguments = (ins
7397    Arg<TF_BoolTensor, [{A `Tensor` of type `bool`.}]>:$x
7398  );
7399
7400  let results = (outs
7401    Res<TF_BoolTensor, [{A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.}]>:$y
7402  );
7403
7404  let hasCanonicalizer = 1;
7405}
7406
7407def TF_LogicalOrOp : TF_Op<"LogicalOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
7408                     WithBroadcastableBinOpBuilder {
7409  let summary = "Returns the truth value of x OR y element-wise.";
7410
7411  let description = [{
7412*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
7413[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
7414  }];
7415
7416  let arguments = (ins
7417    TF_BoolTensor:$x,
7418    TF_BoolTensor:$y
7419  );
7420
7421  let results = (outs
7422    TF_BoolTensor:$z
7423  );
7424}
7425
7426def TF_LookupTableExportV2Op : TF_Op<"LookupTableExportV2", []> {
7427  let summary = "Outputs all keys and values in the table.";
7428
7429  let arguments = (ins
7430    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
7431  );
7432
7433  let results = (outs
7434    Res<TF_Tensor, [{Vector of all keys present in the table.}]>:$keys,
7435    Res<TF_Tensor, [{Tensor of all values in the table. Indexed in parallel with `keys`.}]>:$values
7436  );
7437
7438  TF_DerivedResultTypeAttr Tkeys = TF_DerivedResultTypeAttr<0>;
7439  TF_DerivedResultTypeAttr Tvalues = TF_DerivedResultTypeAttr<1>;
7440}
7441
7442def TF_LookupTableFindOp : TF_Op<"LookupTableFind", []> {
7443  let summary = "Looks up keys in a table, outputs the corresponding values.";
7444
7445  let description = [{
7446The tensor `keys` must of the same type as the keys of the table.
7447The output `values` is of the type of the table values.
7448
7449The scalar `default_value` is the value output for keys not present in the
7450table. It must also be of the same type as the table values.
7451  }];
7452
7453  let arguments = (ins
7454    Arg<TF_StrTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle,
7455    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
7456    TF_Tensor:$default_value
7457  );
7458
7459  let results = (outs
7460    Res<TF_Tensor, [{Same shape as `keys`.  Values found in the table, or `default_values`
7461for missing keys.}]>:$values
7462  );
7463
7464  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7465  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
7466}
7467
7468def TF_LookupTableFindV2Op : TF_Op<"LookupTableFindV2", []> {
7469  let summary = "Looks up keys in a table, outputs the corresponding values.";
7470
7471  let description = [{
7472The tensor `keys` must of the same type as the keys of the table.
7473The output `values` is of the type of the table values.
7474
7475The scalar `default_value` is the value output for keys not present in the
7476table. It must also be of the same type as the table values.
7477  }];
7478
7479  let arguments = (ins
7480    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle,
7481    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
7482    TF_Tensor:$default_value
7483  );
7484
7485  let results = (outs
7486    Res<TF_Tensor, [{Same shape as `keys`.  Values found in the table, or `default_values`
7487for missing keys.}]>:$values
7488  );
7489
7490  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7491  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
7492}
7493
7494def TF_LookupTableImportV2Op : TF_Op<"LookupTableImportV2", []> {
7495  let summary = [{
7496Replaces the contents of the table with the specified keys and values.
7497  }];
7498
7499  let description = [{
7500The tensor `keys` must be of the same type as the keys of the table.
7501The tensor `values` must be of the type of the table values.
7502  }];
7503
7504  let arguments = (ins
7505    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
7506    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
7507    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
7508  );
7509
7510  let results = (outs);
7511
7512  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7513  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
7514}
7515
7516def TF_LookupTableInsertV2Op : TF_Op<"LookupTableInsertV2", []> {
7517  let summary = "Updates the table to associates keys with values.";
7518
7519  let description = [{
7520The tensor `keys` must be of the same type as the keys of the table.
7521The tensor `values` must be of the type of the table values.
7522  }];
7523
7524  let arguments = (ins
7525    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
7526    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
7527    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
7528  );
7529
7530  let results = (outs);
7531
7532  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7533  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
7534}
7535
7536def TF_LookupTableRemoveV2Op : TF_Op<"LookupTableRemoveV2", []> {
7537  let summary = "Removes keys and its associated values from a table.";
7538
7539  let description = [{
7540The tensor `keys` must of the same type as the keys of the table. Keys not
7541already in the table are silently ignored.
7542  }];
7543
7544  let arguments = (ins
7545    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
7546    Arg<TF_Tensor, [{Any shape.  Keys of the elements to remove.}]>:$keys
7547  );
7548
7549  let results = (outs);
7550
7551  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7552}
7553
7554def TF_LookupTableSizeOp : TF_Op<"LookupTableSize", []> {
7555  let summary = "Computes the number of elements in the given table.";
7556
7557  let arguments = (ins
7558    Arg<TF_StrTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
7559  );
7560
7561  let results = (outs
7562    Res<TF_Int64Tensor, [{Scalar that contains number of elements in the table.}]>:$size
7563  );
7564}
7565
7566def TF_LookupTableSizeV2Op : TF_Op<"LookupTableSizeV2", []> {
7567  let summary = "Computes the number of elements in the given table.";
7568
7569  let arguments = (ins
7570    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
7571  );
7572
7573  let results = (outs
7574    Res<TF_Int64Tensor, [{Scalar that contains number of elements in the table.}]>:$size
7575  );
7576}
7577
7578def TF_LowerBoundOp : TF_Op<"LowerBound", [NoSideEffect]> {
7579  let summary = [{
7580Applies lower_bound(sorted_search_values, values) along each row.
7581  }];
7582
7583  let description = [{
7584Each set of rows with the same index in (sorted_inputs, values) is treated
7585independently.  The resulting row is the equivalent of calling
7586`np.searchsorted(sorted_inputs, values, side='left')`.
7587
7588The result is not a global index to the entire
7589`Tensor`, but rather just the index in the last dimension.
7590
7591A 2-D example:
7592  sorted_sequence = [[0, 3, 9, 9, 10],
7593                     [1, 2, 3, 4, 5]]
7594  values = [[2, 4, 9],
7595            [0, 2, 6]]
7596
7597  result = LowerBound(sorted_sequence, values)
7598
7599  result == [[1, 2, 2],
7600             [0, 1, 5]]
7601  }];
7602
7603  let arguments = (ins
7604    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
7605    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
7606the values that will be searched for in `sorted_search_values`.}]>:$values
7607  );
7608
7609  let results = (outs
7610    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the first scalar index
7611into the last dimension where values can be inserted without changing the
7612ordered property.}]>:$output
7613  );
7614
7615  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7616  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
7617}
7618
7619def TF_MakeIteratorOp : TF_Op<"MakeIterator", []> {
7620  let summary = [{
7621Makes a new iterator from the given `dataset` and stores it in `iterator`.
7622  }];
7623
7624  let description = [{
7625This operation may be executed multiple times. Each execution will reset the
7626iterator in `iterator` to the first element of `dataset`.
7627  }];
7628
7629  let arguments = (ins
7630    TF_VariantTensor:$dataset,
7631    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$iterator
7632  );
7633
7634  let results = (outs);
7635}
7636
7637def TF_MakeUniqueOp : TF_Op<"MakeUnique", [NoSideEffect]> {
7638  let summary = [{
7639Make all elements in the non-Batch dimension unique, but \"close\" to
7640  }];
7641
7642  let description = [{
7643their initial value. Never returns a sub-normal number. Never returns
7644zero. The sign of each input element is always identical to the sign
7645of the corresponding output element. Behavior for infinite elements is
7646undefined. Behavior for subnormal elements is undefined.
7647  }];
7648
7649  let arguments = (ins
7650    TF_Float32Tensor:$input
7651  );
7652
7653  let results = (outs
7654    TF_Float32Tensor:$output
7655  );
7656}
7657
7658def TF_MapAndBatchDatasetOp : TF_Op<"MapAndBatchDataset", [NoSideEffect]> {
7659  let summary = "Creates a dataset that fuses mapping with batching.";
7660
7661  let description = [{
7662Creates a dataset that applies `f` to the outputs of `input_dataset` and then
7663batches `batch_size` of them.
7664
7665Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
7666to `batch_size * num_parallel_batches` copies of `f` in parallel.
7667  }];
7668
7669  let arguments = (ins
7670    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
7671    Arg<Variadic<TF_Tensor>, [{A list of tensors, typically values that were captured when building a closure
7672for `f`.}]>:$other_arguments,
7673    Arg<TF_Int64Tensor, [{A scalar representing the number of elements to accumulate in a
7674batch. It determines the number of concurrent invocations of `f` that process
7675elements from `input_dataset` in parallel.}]>:$batch_size,
7676    Arg<TF_Int64Tensor, [{A scalar representing the maximum number of parallel invocations of the `map_fn`
7677function. Applying the `map_fn` on consecutive input elements in parallel has
7678the potential to improve input pipeline throughput.}]>:$num_parallel_calls,
7679    Arg<TF_BoolTensor, [{A scalar representing whether the last batch should be dropped in case its size
7680is smaller than desired.}]>:$drop_remainder,
7681
7682    SymbolRefAttr:$f,
7683    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
7684    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
7685    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
7686  );
7687
7688  let results = (outs
7689    TF_VariantTensor:$handle
7690  );
7691
7692  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
7693}
7694
7695def TF_MapDatasetOp : TF_Op<"MapDataset", [NoSideEffect]> {
7696  let summary = [{
7697Creates a dataset that applies `f` to the outputs of `input_dataset`.
7698  }];
7699
7700  let arguments = (ins
7701    TF_VariantTensor:$input_dataset,
7702    Variadic<TF_Tensor>:$other_arguments,
7703
7704    SymbolRefAttr:$f,
7705    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
7706    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
7707    DefaultValuedAttr<BoolAttr, "true">:$use_inter_op_parallelism,
7708    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
7709  );
7710
7711  let results = (outs
7712    TF_VariantTensor:$handle
7713  );
7714
7715  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
7716}
7717
7718def TF_MatMulOp : TF_Op<"MatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
7719  let summary = [{
7720Multiply the matrix "a" by the matrix "b".
7721  }];
7722
7723  let description = [{
7724The inputs must be two-dimensional matrices and the inner dimension of
7725"a" (after being transposed if transpose_a is true) must match the
7726outer dimension of "b" (after being transposed if transposed_b is
7727true).
7728
7729*Note*: The default kernel implementation for MatMul on GPUs uses
7730cublas.
7731  }];
7732
7733  let arguments = (ins
7734    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$a,
7735    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$b,
7736
7737    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
7738    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
7739  );
7740
7741  let results = (outs
7742    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$product
7743  );
7744
7745  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7746}
7747
7748def TF_MatrixBandPartOp : TF_Op<"MatrixBandPart", [NoSideEffect, TF_AllTypesMatch<["input", "band"]>]> {
7749  let summary = [{
7750Copy a tensor setting everything outside a central band in each innermost matrix to zero.
7751  }];
7752
7753  let description = [{
7754The `band` part is computed as follows:
7755Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
7756tensor with the same shape where
7757
7758`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
7759
7760The indicator function
7761
7762`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
7763                 (num_upper < 0 || (n-m) <= num_upper)`.
7764
7765For example:
7766
7767```
7768# if 'input' is [[ 0,  1,  2, 3]
7769#                [-1,  0,  1, 2]
7770#                [-2, -1,  0, 1]
7771#                [-3, -2, -1, 0]],
7772
7773tf.linalg.band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
7774                                       [-1,  0,  1, 2]
7775                                       [ 0, -1,  0, 1]
7776                                       [ 0,  0, -1, 0]],
7777
7778tf.linalg.band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
7779                                      [-1,  0,  1, 0]
7780                                      [-2, -1,  0, 1]
7781                                      [ 0, -2, -1, 0]]
7782```
7783
7784Useful special cases:
7785
7786```
7787 tf.linalg.band_part(input, 0, -1) ==> Upper triangular part.
7788 tf.linalg.band_part(input, -1, 0) ==> Lower triangular part.
7789 tf.linalg.band_part(input, 0, 0) ==> Diagonal.
7790```
7791  }];
7792
7793  let arguments = (ins
7794    Arg<TF_Tensor, [{Rank `k` tensor.}]>:$input,
7795    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of subdiagonals to keep. If negative, keep entire
7796lower triangle.}]>:$num_lower,
7797    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of superdiagonals to keep. If negative, keep
7798entire upper triangle.}]>:$num_upper
7799  );
7800
7801  let results = (outs
7802    Res<TF_Tensor, [{Rank `k` tensor of the same shape as input. The extracted banded tensor.}]>:$band
7803  );
7804
7805  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7806  TF_DerivedOperandTypeAttr Tindex = TF_DerivedOperandTypeAttr<1>;
7807
7808  let verifier = [{
7809    return Verify(*this);
7810  }];
7811}
7812
7813def TF_MatrixDiagOp : TF_Op<"MatrixDiag", [NoSideEffect]> {
7814  let summary = [{
7815Returns a batched diagonal tensor with a given batched diagonal values.
7816  }];
7817
7818  let description = [{
7819Given a `diagonal`, this operation returns a tensor with the `diagonal` and
7820everything else padded with zeros. The diagonal is computed as follows:
7821
7822Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
7823tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
7824
7825`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
7826
7827For example:
7828
7829```
7830# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
7831
7832and diagonal.shape = (2, 4)
7833
7834tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
7835                                     [0, 2, 0, 0]
7836                                     [0, 0, 3, 0]
7837                                     [0, 0, 0, 4]],
7838                                    [[5, 0, 0, 0]
7839                                     [0, 6, 0, 0]
7840                                     [0, 0, 7, 0]
7841                                     [0, 0, 0, 8]]]
7842
7843which has shape (2, 4, 4)
7844```
7845  }];
7846
7847  let arguments = (ins
7848    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
7849  );
7850
7851  let results = (outs
7852    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.}]>:$output
7853  );
7854
7855  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7856}
7857
7858def TF_MatrixDiagPartV3Op : TF_Op<"MatrixDiagPartV3", [NoSideEffect]> {
7859  let summary = "Returns the batched diagonal part of a batched tensor.";
7860
7861  let description = [{
7862Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
7863`input`.
7864
7865Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
7866Let `max_diag_len` be the maximum length among all diagonals to be extracted,
7867`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7868Let `num_diags` be the number of diagonals to extract,
7869`num_diags = k[1] - k[0] + 1`.
7870
7871If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
7872`[I, J, ..., L, max_diag_len]` and values:
7873
7874```
7875diagonal[i, j, ..., l, n]
7876  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7877    padding_value                 ; otherwise.
7878```
7879where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
7880
7881Otherwise, the output tensor has rank `r` with dimensions
7882`[I, J, ..., L, num_diags, max_diag_len]` with values:
7883
7884```
7885diagonal[i, j, ..., l, m, n]
7886  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7887    padding_value                 ; otherwise.
7888```
7889where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
7890
7891`offset` is zero except when the alignment of the diagonal is to the right.
7892```
7893offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7894                                           and `d >= 0`) or
7895                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7896                                           and `d <= 0`)
7897         0                          ; otherwise
7898```
7899where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7900
7901The input must be at least a matrix.
7902
7903For example:
7904
7905```
7906input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
7907                   [5, 6, 7, 8],
7908                   [9, 8, 7, 6]],
7909                  [[5, 4, 3, 2],
7910                   [1, 2, 3, 4],
7911                   [5, 6, 7, 8]]])
7912
7913# A main diagonal from each batch.
7914tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
7915                                [5, 2, 7]]
7916
7917# A superdiagonal from each batch.
7918tf.matrix_diag_part(input, k = 1)
7919  ==> [[2, 7, 6],  # Output shape: (2, 3)
7920       [4, 3, 8]]
7921
7922# A band from each batch.
7923tf.matrix_diag_part(input, k = (-1, 2))
7924  ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
7925        [2, 7, 6],
7926        [1, 6, 7],
7927        [5, 8, 0]],
7928       [[0, 3, 4],
7929        [4, 3, 8],
7930        [5, 2, 7],
7931        [1, 6, 0]]]
7932
7933# LEFT_RIGHT alignment.
7934tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
7935  ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
7936        [2, 7, 6],
7937        [1, 6, 7],
7938        [0, 5, 8]],
7939       [[3, 4, 0],
7940        [4, 3, 8],
7941        [5, 2, 7],
7942        [0, 1, 6]]]
7943
7944# max_diag_len can be shorter than the main diagonal.
7945tf.matrix_diag_part(input, k = (-2, -1))
7946  ==> [[[5, 8],
7947        [9, 0]],
7948       [[1, 6],
7949        [5, 0]]]
7950
7951# padding_value = 9
7952tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
7953  ==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
7954        [9, 3, 8],
7955        [2, 7, 6]],
7956       [[9, 9, 2],
7957        [9, 3, 4],
7958        [4, 3, 8]]]
7959
7960```
7961  }];
7962
7963  let arguments = (ins
7964    Arg<TF_Tensor, [{Rank `r` tensor where `r >= 2`.}]>:$input,
7965    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7966diagonal, and negative value means subdiagonals. `k` can be a single integer
7967(for a single diagonal) or a pair of integers specifying the low and high ends
7968of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7969    Arg<TF_Tensor, [{The value to fill the area outside the specified diagonal band with.
7970Default is 0.}]>:$padding_value,
7971
7972    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7973  );
7974
7975  let results = (outs
7976    Res<TF_Tensor, [{The extracted diagonal(s).}]>:$diagonal
7977  );
7978
7979  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7980}
7981
7982def TF_MatrixDiagV2Op : TF_Op<"MatrixDiagV2", [NoSideEffect]> {
7983  let summary = [{
7984Returns a batched diagonal tensor with given batched diagonal values.
7985  }];
7986
7987  let description = [{
7988Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
7989diagonals of a matrix, with everything else padded with `padding`. `num_rows`
7990and `num_cols` specify the dimension of the innermost matrix of the output. If
7991both are not specified, the op assumes the innermost matrix is square and infers
7992its size from `k` and the innermost dimension of `diagonal`. If only one of them
7993is specified, the op assumes the unspecified value is the smallest possible
7994based on other criteria.
7995
7996Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
7997rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
7998diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
7999`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
8000
8001The second innermost dimension of `diagonal` has double meaning.
8002When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
8003[I, J, ..., M], and the output tensor is:
8004
8005```
8006output[i, j, ..., l, m, n]
8007  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
8008    padding_value                             ; otherwise
8009```
8010
8011Otherwise, `M` is treated as the number of diagonals for the matrix in the
8012same batch (`M = k[1]-k[0]+1`), and the output tensor is:
8013
8014```
8015output[i, j, ..., l, m, n]
8016  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
8017    padding_value                                     ; otherwise
8018```
8019where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
8020
8021For example:
8022
8023```
8024# The main diagonal.
8025diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
8026                     [5, 6, 7, 8]])
8027tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
8028                               [0, 2, 0, 0],
8029                               [0, 0, 3, 0],
8030                               [0, 0, 0, 4]],
8031                              [[5, 0, 0, 0],
8032                               [0, 6, 0, 0],
8033                               [0, 0, 7, 0],
8034                               [0, 0, 0, 8]]]
8035
8036# A superdiagonal (per batch).
8037diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
8038                     [4, 5, 6]])
8039tf.matrix_diag(diagonal, k = 1)
8040  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
8041        [0, 0, 2, 0],
8042        [0, 0, 0, 3],
8043        [0, 0, 0, 0]],
8044       [[0, 4, 0, 0],
8045        [0, 0, 5, 0],
8046        [0, 0, 0, 6],
8047        [0, 0, 0, 0]]]
8048
8049# A band of diagonals.
8050diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
8051                       [4, 5, 0]],
8052                      [[6, 7, 9],
8053                       [9, 1, 0]]])
8054tf.matrix_diag(diagonals, k = (-1, 0))
8055  ==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
8056        [4, 2, 0],
8057        [0, 5, 3]],
8058       [[6, 0, 0],
8059        [9, 7, 0],
8060        [0, 1, 9]]]
8061
8062# Rectangular matrix.
8063diagonal = np.array([1, 2])  # Input shape: (2)
8064tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
8065  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
8066       [1, 0, 0, 0],
8067       [0, 2, 0, 0]]
8068
8069# Rectangular matrix with inferred num_cols and padding_value = 9.
8070tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
8071  ==> [[9, 9],  # Output shape: (3, 2)
8072       [1, 9],
8073       [9, 2]]
8074```
8075  }];
8076
8077  let arguments = (ins
8078    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
8079    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
8080diagonal, and negative value means subdiagonals. `k` can be a single integer
8081(for a single diagonal) or a pair of integers specifying the low and high ends
8082of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
8083    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
8084the output matrix is a square matrix and infers the matrix size from k and the
8085innermost dimension of `diagonal`.}]>:$num_rows,
8086    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
8087assumes the output matrix is a square matrix and infers the matrix size from
8088k and the innermost dimension of `diagonal`.}]>:$num_cols,
8089    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
8090Default is 0.}]>:$padding_value
8091  );
8092
8093  let results = (outs
8094    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
8095  );
8096
8097  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8098}
8099
8100def TF_MatrixDiagV3Op : TF_Op<"MatrixDiagV3", [NoSideEffect]> {
8101  let summary = [{
8102Returns a batched diagonal tensor with given batched diagonal values.
8103  }];
8104
8105  let description = [{
8106Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
8107diagonals of a matrix, with everything else padded with `padding`. `num_rows`
8108and `num_cols` specify the dimension of the innermost matrix of the output. If
8109both are not specified, the op assumes the innermost matrix is square and infers
8110its size from `k` and the innermost dimension of `diagonal`. If only one of them
8111is specified, the op assumes the unspecified value is the smallest possible
8112based on other criteria.
8113
8114Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
8115rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
8116diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
8117`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
8118
8119The second innermost dimension of `diagonal` has double meaning.
8120When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
8121[I, J, ..., M], and the output tensor is:
8122
8123```
8124output[i, j, ..., l, m, n]
8125  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
8126    padding_value                             ; otherwise
8127```
8128
8129Otherwise, `M` is treated as the number of diagonals for the matrix in the
8130same batch (`M = k[1]-k[0]+1`), and the output tensor is:
8131
8132```
8133output[i, j, ..., l, m, n]
8134  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
8135    padding_value                                     ; otherwise
8136```
8137where `d = n - m`, `diag_index = [k] - d`, and
8138`index_in_diag = n - max(d, 0) + offset`.
8139
8140`offset` is zero except when the alignment of the diagonal is to the right.
8141```
8142offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
8143                                           and `d >= 0`) or
8144                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
8145                                           and `d <= 0`)
8146         0                          ; otherwise
8147```
8148where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
8149
8150For example:
8151
8152```
8153# The main diagonal.
8154diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
8155                     [5, 6, 7, 8]])
8156tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
8157                               [0, 2, 0, 0],
8158                               [0, 0, 3, 0],
8159                               [0, 0, 0, 4]],
8160                              [[5, 0, 0, 0],
8161                               [0, 6, 0, 0],
8162                               [0, 0, 7, 0],
8163                               [0, 0, 0, 8]]]
8164
8165# A superdiagonal (per batch).
8166diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
8167                     [4, 5, 6]])
8168tf.matrix_diag(diagonal, k = 1)
8169  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
8170        [0, 0, 2, 0],
8171        [0, 0, 0, 3],
8172        [0, 0, 0, 0]],
8173       [[0, 4, 0, 0],
8174        [0, 0, 5, 0],
8175        [0, 0, 0, 6],
8176        [0, 0, 0, 0]]]
8177
8178# A tridiagonal band (per batch).
8179diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
8180                       [1, 2, 3],
8181                       [4, 5, 0]],
8182                      [[0, 2, 3],
8183                       [6, 7, 9],
8184                       [9, 1, 0]]])
8185tf.matrix_diag(diagonals, k = (-1, 1))
8186  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
8187        [4, 2, 9],
8188        [0, 5, 3]],
8189       [[6, 2, 0],
8190        [9, 7, 3],
8191        [0, 1, 9]]]
8192
8193# LEFT_RIGHT alignment.
8194diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
8195                       [1, 2, 3],
8196                       [0, 4, 5]],
8197                      [[2, 3, 0],
8198                       [6, 7, 9],
8199                       [0, 9, 1]]])
8200tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
8201  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
8202        [4, 2, 9],
8203        [0, 5, 3]],
8204       [[6, 2, 0],
8205        [9, 7, 3],
8206        [0, 1, 9]]]
8207
8208# Rectangular matrix.
8209diagonal = np.array([1, 2])  # Input shape: (2)
8210tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
8211  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
8212       [1, 0, 0, 0],
8213       [0, 2, 0, 0]]
8214
8215# Rectangular matrix with inferred num_cols and padding_value = 9.
8216tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
8217  ==> [[9, 9],  # Output shape: (3, 2)
8218       [1, 9],
8219       [9, 2]]
8220
8221```
8222  }];
8223
8224  let arguments = (ins
8225    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
8226    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
8227diagonal, and negative value means subdiagonals. `k` can be a single integer
8228(for a single diagonal) or a pair of integers specifying the low and high ends
8229of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
8230    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
8231the output matrix is a square matrix and infers the matrix size from k and the
8232innermost dimension of `diagonal`.}]>:$num_rows,
8233    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
8234assumes the output matrix is a square matrix and infers the matrix size from
8235k and the innermost dimension of `diagonal`.}]>:$num_cols,
8236    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
8237Default is 0.}]>:$padding_value,
8238
8239    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
8240  );
8241
8242  let results = (outs
8243    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
8244  );
8245
8246  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8247}
8248
8249def TF_MatrixInverseOp : TF_Op<"MatrixInverse", [NoSideEffect]> {
8250  let summary = [{
8251Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
8252  }];
8253
8254  let description = [{
8255The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
8256form square matrices. The output is a tensor of the same shape as the input
8257containing the inverse for all input submatrices `[..., :, :]`.
8258
8259The op uses LU decomposition with partial pivoting to compute the inverses.
8260
8261If a matrix is not invertible there is no guarantee what the op does. It
8262may detect the condition and raise an exception or it may simply return a
8263garbage result.
8264  }];
8265
8266  let arguments = (ins
8267    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input,
8268
8269    DefaultValuedAttr<BoolAttr, "false">:$adjoint
8270  );
8271
8272  let results = (outs
8273    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.
8274
8275@compatibility(numpy)
8276Equivalent to np.linalg.inv
8277@end_compatibility}]>:$output
8278  );
8279
8280  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8281}
8282
8283def TF_MatrixSetDiagOp : TF_Op<"MatrixSetDiag", [NoSideEffect]> {
8284  let summary = [{
8285Returns a batched matrix tensor with new batched diagonal values.
8286  }];
8287
8288  let description = [{
8289Given `input` and `diagonal`, this operation returns a tensor with the
8290same shape and values as `input`, except for the main diagonal of the
8291innermost matrices.  These will be overwritten by the values in `diagonal`.
8292
8293The output is computed as follows:
8294
8295Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
8296`k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
8297tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
8298
8299  * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
8300  * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
8301  }];
8302
8303  let arguments = (ins
8304    Arg<TF_Tensor, [{Rank `k+1`, where `k >= 1`.}]>:$input,
8305    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
8306  );
8307
8308  let results = (outs
8309    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = input.shape`.}]>:$output
8310  );
8311
8312  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8313
8314  let hasCanonicalizer = 1;
8315}
8316
8317def TF_MatrixSetDiagV2Op : TF_Op<"MatrixSetDiagV2", [NoSideEffect]> {
8318  let summary = [{
8319Returns a batched matrix tensor with new batched diagonal values.
8320  }];
8321
8322  let description = [{
8323Given `input` and `diagonal`, this operation returns a tensor with the
8324same shape and values as `input`, except for the specified diagonals of the
8325innermost matrices. These will be overwritten by the values in `diagonal`.
8326
8327`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
8328`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
8329Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
8330`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
8331`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
8332`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
8333
8334The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
8335If `k` is scalar or `k[0] == k[1]`:
8336
8337```
8338output[i, j, ..., l, m, n]
8339  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
8340    input[i, j, ..., l, m, n]              ; otherwise
8341```
8342
8343Otherwise,
8344
8345```
8346output[i, j, ..., l, m, n]
8347  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
8348    input[i, j, ..., l, m, n]                         ; otherwise
8349```
8350where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
8351
8352For example:
8353
8354```
8355# The main diagonal.
8356input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
8357                   [7, 7, 7, 7],
8358                   [7, 7, 7, 7]],
8359                  [[7, 7, 7, 7],
8360                   [7, 7, 7, 7],
8361                   [7, 7, 7, 7]]])
8362diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
8363                     [4, 5, 6]])
8364tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
8365                                   [7, 2, 7, 7],
8366                                   [7, 7, 3, 7]],
8367                                  [[4, 7, 7, 7],
8368                                   [7, 5, 7, 7],
8369                                   [7, 7, 6, 7]]]
8370
8371# A superdiagonal (per batch).
8372tf.matrix_set_diag(diagonal, k = 1)
8373  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
8374        [7, 7, 2, 7],
8375        [7, 7, 7, 3]],
8376       [[7, 4, 7, 7],
8377        [7, 7, 5, 7],
8378        [7, 7, 7, 6]]]
8379
8380# A band of diagonals.
8381diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
8382                       [4, 5, 0]],
8383                      [[6, 1, 2],
8384                       [3, 4, 0]]])
8385tf.matrix_set_diag(diagonals, k = (-1, 0))
8386  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
8387        [4, 2, 7, 7],
8388        [0, 5, 3, 7]],
8389       [[6, 7, 7, 7],
8390        [3, 1, 7, 7],
8391        [7, 4, 2, 7]]]
8392
8393```
8394  }];
8395
8396  let arguments = (ins
8397    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
8398    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
8399`k >= 1`.}]>:$diagonal,
8400    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
8401diagonal, and negative value means subdiagonals. `k` can be a single integer
8402(for a single diagonal) or a pair of integers specifying the low and high ends
8403of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k
8404  );
8405
8406  let results = (outs
8407    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
8408  );
8409
8410  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8411
8412  let hasCanonicalizer = 1;
8413}
8414
8415def TF_MatrixSetDiagV3Op : TF_Op<"MatrixSetDiagV3", [NoSideEffect]> {
8416  let summary = [{
8417Returns a batched matrix tensor with new batched diagonal values.
8418  }];
8419
8420  let description = [{
8421Given `input` and `diagonal`, this operation returns a tensor with the
8422same shape and values as `input`, except for the specified diagonals of the
8423innermost matrices. These will be overwritten by the values in `diagonal`.
8424
8425`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
8426`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
8427Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
8428`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
8429`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
8430`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
8431
8432The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
8433If `k` is scalar or `k[0] == k[1]`:
8434
8435```
8436output[i, j, ..., l, m, n]
8437  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
8438    input[i, j, ..., l, m, n]              ; otherwise
8439```
8440
8441Otherwise,
8442
8443```
8444output[i, j, ..., l, m, n]
8445  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
8446    input[i, j, ..., l, m, n]                         ; otherwise
8447```
8448where `d = n - m`, `diag_index = k[1] - d`, and
8449`index_in_diag = n - max(d, 0) + offset`.
8450
8451`offset` is zero except when the alignment of the diagonal is to the right.
8452```
8453offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
8454                                           and `d >= 0`) or
8455                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
8456                                           and `d <= 0`)
8457         0                          ; otherwise
8458```
8459where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
8460
8461For example:
8462
8463```
8464# The main diagonal.
8465input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
8466                   [7, 7, 7, 7],
8467                   [7, 7, 7, 7]],
8468                  [[7, 7, 7, 7],
8469                   [7, 7, 7, 7],
8470                   [7, 7, 7, 7]]])
8471diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
8472                     [4, 5, 6]])
8473tf.matrix_set_diag(input, diagonal)
8474  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
8475        [7, 2, 7, 7],
8476        [7, 7, 3, 7]],
8477       [[4, 7, 7, 7],
8478        [7, 5, 7, 7],
8479        [7, 7, 6, 7]]]
8480
8481# A superdiagonal (per batch).
8482tf.matrix_set_diag(input, diagonal, k = 1)
8483  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
8484        [7, 7, 2, 7],
8485        [7, 7, 7, 3]],
8486       [[7, 4, 7, 7],
8487        [7, 7, 5, 7],
8488        [7, 7, 7, 6]]]
8489
8490# A band of diagonals.
8491diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
8492                       [6, 5, 8],
8493                       [1, 2, 3],
8494                       [4, 5, 0]],
8495                      [[0, 1, 2],
8496                       [5, 6, 4],
8497                       [6, 1, 2],
8498                       [3, 4, 0]]])
8499tf.matrix_set_diag(input, diagonals, k = (-1, 2))
8500  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
8501        [4, 2, 5, 1],
8502        [7, 5, 3, 8]],
8503       [[6, 5, 1, 7],
8504        [3, 1, 6, 2],
8505        [7, 4, 2, 4]]]
8506
8507# LEFT_RIGHT alignment.
8508diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
8509                       [6, 5, 8],
8510                       [1, 2, 3],
8511                       [0, 4, 5]],
8512                      [[1, 2, 0],
8513                       [5, 6, 4],
8514                       [6, 1, 2],
8515                       [0, 3, 4]]])
8516tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
8517  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
8518        [4, 2, 5, 1],
8519        [7, 5, 3, 8]],
8520       [[6, 5, 1, 7],
8521        [3, 1, 6, 2],
8522        [7, 4, 2, 4]]]
8523
8524```
8525  }];
8526
8527  let arguments = (ins
8528    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
8529    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
8530`k >= 1`.}]>:$diagonal,
8531    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
8532diagonal, and negative value means subdiagonals. `k` can be a single integer
8533(for a single diagonal) or a pair of integers specifying the low and high ends
8534of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
8535
8536    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
8537  );
8538
8539  let results = (outs
8540    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
8541  );
8542
8543  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8544}
8545
8546def TF_MatrixSolveOp : TF_Op<"MatrixSolve", [NoSideEffect]> {
8547  let summary = "Solves systems of linear equations.";
8548
8549  let description = [{
8550`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
8551form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
8552a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
8553satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
8554If `adjoint` is `True` then each output matrix satisfies
8555`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
8556  }];
8557
8558  let arguments = (ins
8559    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
8560    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
8561
8562    DefaultValuedAttr<BoolAttr, "false">:$adjoint
8563  );
8564
8565  let results = (outs
8566    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
8567  );
8568
8569  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8570}
8571
8572def TF_MatrixTriangularSolveOp : TF_Op<"MatrixTriangularSolve", [NoSideEffect]> {
8573  let summary = [{
8574Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
8575  }];
8576
8577  let description = [{
8578`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
8579square matrices. If `lower` is `True` then the strictly upper triangular part
8580of each inner-most matrix is assumed to be zero and not accessed.
8581If `lower` is False then the strictly lower triangular part of each inner-most
8582matrix is assumed to be zero and not accessed.
8583`rhs` is a tensor of shape `[..., M, N]`.
8584
8585The output is a tensor of shape `[..., M, N]`. If `adjoint` is
8586`True` then the innermost matrices in `output` satisfy matrix equations
8587`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
8588If `adjoint` is `False` then the strictly then the  innermost matrices in
8589`output` satisfy matrix equations
8590`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
8591
8592Note, the batch shapes for the inputs only need to broadcast.
8593
8594Example:
8595```python
8596
8597a = tf.constant([[3,  0,  0,  0],
8598                 [2,  1,  0,  0],
8599                 [1,  0,  1,  0],
8600                 [1,  1,  1,  1]], dtype=tf.float32)
8601
8602b = tf.constant([[4],
8603                 [2],
8604                 [4],
8605                 [2]], dtype=tf.float32)
8606
8607x = tf.linalg.triangular_solve(a, b, lower=True)
8608x
8609# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
8610# array([[ 1.3333334 ],
8611#        [-0.66666675],
8612#        [ 2.6666665 ],
8613#        [-1.3333331 ]], dtype=float32)>
8614
8615# in python3 one can use `a@x`
8616tf.matmul(a, x)
8617# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
8618# array([[4.       ],
8619#        [2.       ],
8620#        [4.       ],
8621#        [1.9999999]], dtype=float32)>
8622```
8623  }];
8624
8625  let arguments = (ins
8626    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
8627    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
8628
8629    DefaultValuedAttr<BoolAttr, "true">:$lower,
8630    DefaultValuedAttr<BoolAttr, "false">:$adjoint
8631  );
8632
8633  let results = (outs
8634    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
8635  );
8636
8637  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8638}
8639
8640def TF_MaxOp : TF_Op<"Max", [NoSideEffect]> {
8641  let summary = [{
8642Computes the maximum of elements across dimensions of a tensor.
8643  }];
8644
8645  let description = [{
8646Reduces `input` along the dimensions given in `axis`. Unless
8647`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8648`axis`. If `keep_dims` is true, the reduced dimensions are
8649retained with length 1.
8650  }];
8651
8652  let arguments = (ins
8653    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8654    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8655`[-rank(input), rank(input))`.}]>:$reduction_indices,
8656
8657    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8658  );
8659
8660  let results = (outs
8661    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8662  );
8663
8664  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8665  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8666
8667  let builders = [
8668    OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
8669      "BoolAttr":$keep_dims)>
8670  ];
8671}
8672
8673def TF_MaxPoolOp : TF_Op<"MaxPool", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
8674  let summary = "Performs max pooling on the input.";
8675
8676  let arguments = (ins
8677    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
8678
8679    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8680    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8681    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
8682    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
8683    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
8684  );
8685
8686  let results = (outs
8687    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
8688  );
8689
8690  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8691
8692  let extraClassDeclaration = [{
8693    // TF_FoldOperandsTransposeInterface:
8694    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
8695    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
8696    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
8697    // TF_LayoutSensitiveInterface:
8698    StringRef GetOptimalLayout(const RuntimeDevices& devices);
8699    LogicalResult UpdateDataFormat(StringRef data_format);
8700  }];
8701}
8702
8703def TF_MaxPool3DOp : TF_Op<"MaxPool3D", [NoSideEffect]> {
8704  let summary = "Performs 3D max pooling on the input.";
8705
8706  let arguments = (ins
8707    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
8708
8709    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8710    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8711    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8712    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8713  );
8714
8715  let results = (outs
8716    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The max pooled output tensor.}]>:$output
8717  );
8718
8719  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8720}
8721
8722def TF_MaxPool3DGradOp : TF_Op<"MaxPool3DGrad", [NoSideEffect]> {
8723  let summary = "Computes gradients of 3D max pooling function.";
8724
8725  let arguments = (ins
8726    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original input tensor.}]>:$orig_input,
8727    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original output tensor.}]>:$orig_output,
8728    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
8729
8730    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8731    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8732    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8733    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8734  );
8735
8736  let results = (outs
8737    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
8738  );
8739
8740  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
8741  TF_DerivedOperandTypeAttr TInput = TF_DerivedOperandTypeAttr<0>;
8742}
8743
8744def TF_MaxPool3DGradGradOp : TF_Op<"MaxPool3DGradGrad", [NoSideEffect]> {
8745  let summary = "Computes second-order gradients of the maxpooling function.";
8746
8747  let arguments = (ins
8748    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8749    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8750    Arg<TF_IntOrFpTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
8751
8752    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8753    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8754    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8755    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8756  );
8757
8758  let results = (outs
8759    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8760  );
8761
8762  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8763}
8764
8765def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [NoSideEffect]> {
8766  let summary = "Computes gradients of the maxpooling function.";
8767
8768  let arguments = (ins
8769    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8770    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8771    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8772
8773    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8774    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8775    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
8776    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
8777    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8778  );
8779
8780  let results = (outs
8781    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8782  );
8783
8784  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8785
8786  let verifier = [{
8787    return Verify(*this);
8788  }];
8789}
8790
8791def TF_MaxPoolGradGradOp : TF_Op<"MaxPoolGradGrad", [NoSideEffect]> {
8792  let summary = "Computes second-order gradients of the maxpooling function.";
8793
8794  let arguments = (ins
8795    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8796    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8797    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8798
8799    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8800    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8801    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8802    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8803  );
8804
8805  let results = (outs
8806    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8807  );
8808
8809  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8810}
8811
8812def TF_MaxPoolGradGradV2Op : TF_Op<"MaxPoolGradGradV2", [NoSideEffect]> {
8813  let summary = "Computes second-order gradients of the maxpooling function.";
8814
8815  let arguments = (ins
8816    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8817    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8818    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8819    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8820    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8821input tensor.}]>:$strides,
8822
8823    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8824    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8825  );
8826
8827  let results = (outs
8828    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8829  );
8830
8831  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8832}
8833
8834def TF_MaxPoolGradV2Op : TF_Op<"MaxPoolGradV2", [NoSideEffect]> {
8835  let summary = "Computes gradients of the maxpooling function.";
8836
8837  let arguments = (ins
8838    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8839    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8840    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8841    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8842    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8843input tensor.}]>:$strides,
8844
8845    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8846    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8847  );
8848
8849  let results = (outs
8850    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8851  );
8852
8853  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8854}
8855
8856def TF_MaxPoolV2Op : TF_Op<"MaxPoolV2", [NoSideEffect]> {
8857  let summary = "Performs max pooling on the input.";
8858
8859  let arguments = (ins
8860    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
8861    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8862    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8863input tensor.}]>:$strides,
8864
8865    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8866    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
8867  );
8868
8869  let results = (outs
8870    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
8871  );
8872
8873  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8874}
8875
8876def TF_MaximumOp : TF_Op<"Maximum", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
8877                   WithBroadcastableBinOpBuilder {
8878  let summary = "Returns the max of x and y (i.e. x > y ? x : y) element-wise.";
8879
8880  let description = [{
8881*NOTE*: `Maximum` supports broadcasting. More about broadcasting
8882[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8883  }];
8884
8885  let arguments = (ins
8886    TF_IntOrFpTensor:$x,
8887    TF_IntOrFpTensor:$y
8888  );
8889
8890  let results = (outs
8891    TF_IntOrFpTensor:$z
8892  );
8893
8894  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8895}
8896
8897def TF_MeanOp : TF_Op<"Mean", [NoSideEffect, TF_FoldOperandsTransposeInterface]> {
8898  let summary = "Computes the mean of elements across dimensions of a tensor.";
8899
8900  let description = [{
8901Reduces `input` along the dimensions given in `axis`. Unless
8902`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8903`axis`. If `keep_dims` is true, the reduced dimensions are
8904retained with length 1.
8905  }];
8906
8907  let arguments = (ins
8908    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8909    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8910`[-rank(input), rank(input))`.}]>:$reduction_indices,
8911
8912    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8913  );
8914
8915  let results = (outs
8916    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8917  );
8918
8919  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8920  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8921
8922  let extraClassDeclaration = [{
8923    // TF_FoldOperandsTransposeInterface:
8924    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
8925    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {}; }
8926    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
8927  }];
8928}
8929
8930def TF_MergeSummaryOp : TF_Op<"MergeSummary", [NoSideEffect, SameOperandsAndResultType]> {
8931  let summary = "Merges summaries.";
8932
8933  let description = [{
8934This op creates a
8935[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
8936protocol buffer that contains the union of all the values in the input
8937summaries.
8938
8939When the Op is run, it reports an `InvalidArgument` error if multiple values
8940in the summaries to merge use the same tag.
8941  }];
8942
8943  let arguments = (ins
8944    Arg<Variadic<TF_StrTensor>, [{Can be of any shape.  Each must contain serialized `Summary` protocol
8945buffers.}]>:$inputs
8946  );
8947
8948  let results = (outs
8949    Res<TF_StrTensor, [{Scalar. Serialized `Summary` protocol buffer.}]>:$summary
8950  );
8951
8952  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
8953}
8954
8955def TF_MergeV2CheckpointsOp : TF_Op<"MergeV2Checkpoints", []> {
8956  let summary = [{
8957V2 format specific: merges the metadata files of sharded checkpoints.  The
8958  }];
8959
8960  let description = [{
8961result is one logical checkpoint, with one physical metadata file and renamed
8962data files.
8963
8964Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
8965
8966If delete_old_dirs is true, attempts to delete recursively the dirname of each
8967path in the input checkpoint_prefixes.  This is useful when those paths are non
8968user-facing temporary locations.
8969  }];
8970
8971  let arguments = (ins
8972    Arg<TF_StrTensor, [{prefixes of V2 checkpoints to merge.}]>:$checkpoint_prefixes,
8973    Arg<TF_StrTensor, [{scalar.  The desired final prefix.  Allowed to be the same
8974as one of the checkpoint_prefixes.}]>:$destination_prefix,
8975
8976    DefaultValuedAttr<BoolAttr, "true">:$delete_old_dirs
8977  );
8978
8979  let results = (outs);
8980}
8981
8982def TF_MinOp : TF_Op<"Min", [NoSideEffect]> {
8983  let summary = [{
8984Computes the minimum of elements across dimensions of a tensor.
8985  }];
8986
8987  let description = [{
8988Reduces `input` along the dimensions given in `axis`. Unless
8989`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8990`axis`. If `keep_dims` is true, the reduced dimensions are
8991retained with length 1.
8992  }];
8993
8994  let arguments = (ins
8995    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8996    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8997`[-rank(input), rank(input))`.}]>:$reduction_indices,
8998
8999    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
9000  );
9001
9002  let results = (outs
9003    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
9004  );
9005
9006  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9007  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
9008}
9009
9010def TF_MinimumOp : TF_Op<"Minimum", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
9011                   WithBroadcastableBinOpBuilder {
9012  let summary = "Returns the min of x and y (i.e. x < y ? x : y) element-wise.";
9013
9014  let description = [{
9015*NOTE*: `Minimum` supports broadcasting. More about broadcasting
9016[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9017  }];
9018
9019  let arguments = (ins
9020    TF_IntOrFpTensor:$x,
9021    TF_IntOrFpTensor:$y
9022  );
9023
9024  let results = (outs
9025    TF_IntOrFpTensor:$z
9026  );
9027
9028  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9029}
9030
9031def TF_MirrorPadOp : TF_Op<"MirrorPad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
9032  let summary = "Pads a tensor with mirrored values.";
9033
9034  let description = [{
9035This operation pads a `input` with mirrored values according to the `paddings`
9036you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
9037the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
9038how many values to add before the contents of `input` in that dimension, and
9039`paddings[D, 1]` indicates how many values to add after the contents of `input`
9040in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
9041than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
9042(if false, respectively).
9043
9044The padded size of each dimension D of the output is:
9045
9046`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
9047
9048For example:
9049
9050```
9051# 't' is [[1, 2, 3], [4, 5, 6]].
9052# 'paddings' is [[1, 1]], [2, 2]].
9053# 'mode' is SYMMETRIC.
9054# rank of 't' is 2.
9055pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
9056                      [2, 1, 1, 2, 3, 3, 2]
9057                      [5, 4, 4, 5, 6, 6, 5]
9058                      [5, 4, 4, 5, 6, 6, 5]]
9059```
9060  }];
9061
9062  let arguments = (ins
9063    Arg<TF_Tensor, [{The input tensor to be padded.}]>:$input,
9064    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
9065rows must be the same as the rank of `input`.}]>:$paddings,
9066
9067    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
9068  );
9069
9070  let results = (outs
9071    Res<TF_Tensor, [{The padded tensor.}]>:$output
9072  );
9073
9074  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9075  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9076}
9077
9078def TF_MirrorPadGradOp : TF_Op<"MirrorPadGrad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
9079  let summary = [{
9080Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
9081  }];
9082
9083  let description = [{
9084This operation folds the padded areas of `input` by `MirrorPad` according to the
9085`paddings` you specify. `paddings` must be the same as `paddings` argument
9086given to the corresponding `MirrorPad` op.
9087
9088The folded size of each dimension D of the output is:
9089
9090`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
9091
9092For example:
9093
9094```
9095# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
9096# 'paddings' is [[0, 1]], [0, 1]].
9097# 'mode' is SYMMETRIC.
9098# rank of 't' is 2.
9099pad(t, paddings) ==> [[ 1,  5]
9100                      [11, 28]]
9101```
9102  }];
9103
9104  let arguments = (ins
9105    Arg<TF_Tensor, [{The input tensor to be folded.}]>:$input,
9106    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
9107rows must be the same as the rank of `input`.}]>:$paddings,
9108
9109    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
9110  );
9111
9112  let results = (outs
9113    Res<TF_Tensor, [{The folded tensor.}]>:$output
9114  );
9115
9116  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9117  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9118}
9119
9120def TF_MlirLocalVarOp : TF_Op<"MlirLocalVarOp", []> {
9121  let summary = "Creates a handle to an in-scope variable.";
9122
9123  let description = [{
9124Used by internal passes for temporary representation of local state, which will
9125be eventually removed.
9126  }];
9127
9128  let arguments = (ins);
9129
9130  let results = (outs
9131    Res<TF_ResourceTensor, "", [TF_VariableAlloc]>:$resource
9132  );
9133}
9134
9135def TF_MlirPassthroughOp : TF_Op<"MlirPassthroughOp", [NoSideEffect]> {
9136  let summary = [{
9137Wraps an arbitrary MLIR computation expressed as a module with a main() function.
9138  }];
9139
9140  let description = [{
9141This operation does not have an associated kernel and is not intended to be
9142executed in a regular TensorFlow session. Instead it is intended to be used for
9143testing or for special case where a user intends to pass custom MLIR computation
9144through a TensorFlow graph with the intent of having custom tooling processing
9145it downstream (when targeting a different environment, like TensorFlow lite for
9146example).
9147The MLIR module is expected to have a main() function that will be used as an
9148entry point. The inputs to the operations will be passed as argument to the
9149main() function and the returned values of the main function mapped to the
9150outputs.
9151Example usage:
9152
9153```
9154import tensorflow as tf
9155from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
9156
9157mlir_module = '''python
9158func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
9159   %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
9160   return %ret : tensor<10x10xf32>
9161}
9162'''
9163
9164@tf.function
9165def foo(x, y):
9166  return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
9167
9168graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
9169```
9170  }];
9171
9172  let arguments = (ins
9173    Variadic<TF_Tensor>:$inputs,
9174
9175    StrAttr:$mlir_module
9176  );
9177
9178  let results = (outs
9179    Variadic<TF_Tensor>:$outputs
9180  );
9181
9182  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
9183  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
9184}
9185
9186def TF_ModOp : TF_Op<"Mod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
9187               WithBroadcastableBinOpBuilder {
9188  let summary = [{
9189Returns element-wise remainder of division. This emulates C semantics in that
9190  }];
9191
9192  let description = [{
9193the result here is consistent with a truncating divide. E.g.
9194`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
9195
9196*NOTE*: `Mod` supports broadcasting. More about broadcasting
9197[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9198  }];
9199
9200  let arguments = (ins
9201    TF_FpOrI32OrI64Tensor:$x,
9202    TF_FpOrI32OrI64Tensor:$y
9203  );
9204
9205  let results = (outs
9206    TF_FpOrI32OrI64Tensor:$z
9207  );
9208
9209  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9210}
9211
9212def TF_ModelDatasetOp : TF_Op<"ModelDataset", [NoSideEffect]> {
9213  let summary = "Identity transformation that models performance.";
9214
9215  let description = [{
9216Identity transformation that models performance.
9217  }];
9218
9219  let arguments = (ins
9220    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
9221
9222    DefaultValuedAttr<I64Attr, "0">:$algorithm,
9223    DefaultValuedAttr<I64Attr, "0">:$cpu_budget,
9224    DefaultValuedAttr<I64Attr, "0">:$ram_budget,
9225    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9226    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
9227  );
9228
9229  let results = (outs
9230    TF_VariantTensor:$handle
9231  );
9232}
9233
9234def TF_MulOp : TF_Op<"Mul", [Commutative, NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
9235               WithBroadcastableBinOpBuilder {
9236  let summary = "Returns x * y element-wise.";
9237
9238  let description = [{
9239*NOTE*: `Multiply` supports broadcasting. More about broadcasting
9240[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9241  }];
9242
9243  let arguments = (ins
9244    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
9245    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
9246  );
9247
9248  let results = (outs
9249    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
9250  );
9251
9252  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9253
9254  let hasFolder = 1;
9255}
9256
9257def TF_MulNoNanOp : TF_Op<"MulNoNan", [NoSideEffect, ResultsBroadcastableShape]>,
9258                    WithBroadcastableBinOpBuilder {
9259  let summary = [{
9260Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
9261  }];
9262
9263  let description = [{
9264*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
9265[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9266  }];
9267
9268  let arguments = (ins
9269    TF_FpOrComplexTensor:$x,
9270    TF_FpOrComplexTensor:$y
9271  );
9272
9273  let results = (outs
9274    TF_FpOrComplexTensor:$z
9275  );
9276
9277  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9278
9279  let hasCanonicalizer = 1;
9280}
9281
9282def TF_MultiDeviceIteratorOp : TF_Op<"MultiDeviceIterator", []> {
9283  let summary = "Creates a MultiDeviceIterator resource.";
9284
9285  let arguments = (ins
9286    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
9287    StrAttr:$shared_name,
9288    StrAttr:$container,
9289    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9290    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
9291  );
9292
9293  let results = (outs
9294    Res<TF_ResourceTensor, [{Handle to the resource created.}], [TF_DatasetIteratorAlloc]>:$handle
9295  );
9296}
9297
9298def TF_MultiDeviceIteratorFromStringHandleOp : TF_Op<"MultiDeviceIteratorFromStringHandle", []> {
9299  let summary = [{
9300Generates a MultiDeviceIterator resource from its provided string handle.
9301  }];
9302
9303  let arguments = (ins
9304    Arg<TF_StrTensor, [{String representing the resource.}]>:$string_handle,
9305
9306    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
9307    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
9308  );
9309
9310  let results = (outs
9311    Res<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorAlloc]>:$multi_device_iterator
9312  );
9313}
9314
9315def TF_MultiDeviceIteratorGetNextFromShardOp : TF_Op<"MultiDeviceIteratorGetNextFromShard", []> {
9316  let summary = "Gets next element for the provided shard number.";
9317
9318  let arguments = (ins
9319    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
9320    Arg<TF_Int32Tensor, [{Integer representing which shard to fetch data for.}]>:$shard_num,
9321    Arg<TF_Int64Tensor, [{Which incarnation of the MultiDeviceIterator is running.}]>:$incarnation_id
9322  );
9323
9324  let results = (outs
9325    Res<Variadic<TF_Tensor>, [{Result of the get_next on the dataset.}]>:$components
9326  );
9327
9328  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
9329  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
9330}
9331
9332def TF_MultiDeviceIteratorInitOp : TF_Op<"MultiDeviceIteratorInit", []> {
9333  let summary = "Initializes the multi device iterator with the given dataset.";
9334
9335  let arguments = (ins
9336    Arg<TF_VariantTensor, [{Dataset to be iterated upon.}]>:$dataset,
9337    Arg<TF_ResourceTensor, [{A MultiDeviceIteratorResource.}], [TF_DatasetIteratorWrite]>:$multi_device_iterator,
9338    Arg<TF_Int64Tensor, [{The maximum size of the host side per device buffer to keep.}]>:$max_buffer_size
9339  );
9340
9341  let results = (outs
9342    Res<TF_Int64Tensor, [{An int64 indicating which incarnation of the MultiDeviceIterator
9343is running.}]>:$incarnation_id
9344  );
9345}
9346
9347def TF_MultiDeviceIteratorToStringHandleOp : TF_Op<"MultiDeviceIteratorToStringHandle", []> {
9348  let summary = "Produces a string handle for the given MultiDeviceIterator.";
9349
9350  let arguments = (ins
9351    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead]>:$multi_device_iterator
9352  );
9353
9354  let results = (outs
9355    Res<TF_StrTensor, [{A string representing the resource.}]>:$string_handle
9356  );
9357}
9358
9359def TF_MultinomialOp : TF_Op<"Multinomial", [TF_CannotDuplicate]> {
9360  let summary = "Draws samples from a multinomial distribution.";
9361
9362  let arguments = (ins
9363    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
9364represents the unnormalized log probabilities for all classes.}]>:$logits,
9365    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
9366
9367    DefaultValuedAttr<I64Attr, "0">:$seed,
9368    DefaultValuedAttr<I64Attr, "0">:$seed2
9369  );
9370
9371  let results = (outs
9372    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
9373contains the drawn class labels with range `[0, num_classes)`.}]>:$output
9374  );
9375
9376  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9377  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
9378}
9379
9380def TF_MutableDenseHashTableV2Op : TF_Op<"MutableDenseHashTableV2", []> {
9381  let summary = [{
9382Creates an empty hash table that uses tensors as the backing store.
9383  }];
9384
9385  let description = [{
9386It uses "open addressing" with quadratic reprobing to resolve
9387collisions.
9388
9389This op creates a mutable hash table, specifying the type of its keys and
9390values. Each value must be a scalar. Data can be inserted into the table using
9391the insert operations. It does not support the initialization operation.
9392  }];
9393
9394  let arguments = (ins
9395    Arg<TF_Tensor, [{The key used to represent empty key buckets internally. Must not
9396be used in insert or lookup operations.}]>:$empty_key,
9397    TF_Tensor:$deleted_key,
9398
9399    DefaultValuedAttr<StrAttr, "">:$container,
9400    DefaultValuedAttr<StrAttr, "">:$shared_name,
9401    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
9402    TypeAttr:$value_dtype,
9403    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape,
9404    DefaultValuedAttr<I64Attr, "131072">:$initial_num_buckets,
9405    DefaultValuedAttr<F32Attr, "0.8f">:$max_load_factor
9406  );
9407
9408  let results = (outs
9409    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
9410  );
9411
9412  TF_DerivedOperandTypeAttr key_dtype = TF_DerivedOperandTypeAttr<0>;
9413}
9414
9415def TF_MutableHashTableOfTensorsV2Op : TF_Op<"MutableHashTableOfTensorsV2", []> {
9416  let summary = "Creates an empty hash table.";
9417
9418  let description = [{
9419This op creates a mutable hash table, specifying the type of its keys and
9420values. Each value must be a vector. Data can be inserted into the table using
9421the insert operations. It does not support the initialization operation.
9422  }];
9423
9424  let arguments = (ins
9425    DefaultValuedAttr<StrAttr, "">:$container,
9426    DefaultValuedAttr<StrAttr, "">:$shared_name,
9427    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
9428    TypeAttr:$key_dtype,
9429    TypeAttr:$value_dtype,
9430    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape
9431  );
9432
9433  let results = (outs
9434    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
9435  );
9436}
9437
9438def TF_MutableHashTableV2Op : TF_Op<"MutableHashTableV2", []> {
9439  let summary = "Creates an empty hash table.";
9440
9441  let description = [{
9442This op creates a mutable hash table, specifying the type of its keys and
9443values. Each value must be a scalar. Data can be inserted into the table using
9444the insert operations. It does not support the initialization operation.
9445  }];
9446
9447  let arguments = (ins
9448    DefaultValuedAttr<StrAttr, "">:$container,
9449    DefaultValuedAttr<StrAttr, "">:$shared_name,
9450    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
9451    TypeAttr:$key_dtype,
9452    TypeAttr:$value_dtype
9453  );
9454
9455  let results = (outs
9456    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
9457  );
9458}
9459
9460def TF_NdtriOp : TF_Op<"Ndtri", [NoSideEffect]> {
9461  let summary = "";
9462
9463  let arguments = (ins
9464    TF_FloatTensor:$x
9465  );
9466
9467  let results = (outs
9468    TF_FloatTensor:$y
9469  );
9470
9471  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9472}
9473
9474def TF_NegOp : TF_Op<"Neg", [Involution, NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
9475  let summary = "Computes numerical negative value element-wise.";
9476
9477  let description = [{
9478I.e., \\(y = -x\\).
9479  }];
9480
9481  let arguments = (ins
9482    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
9483  );
9484
9485  let results = (outs
9486    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
9487  );
9488
9489  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9490}
9491
9492def TF_NextAfterOp : TF_Op<"NextAfter", [NoSideEffect, ResultsBroadcastableShape]>,
9493                     WithBroadcastableBinOpBuilder {
9494  let summary = [{
9495Returns the next representable value of `x1` in the direction of `x2`, element-wise.
9496  }];
9497
9498  let description = [{
9499This operation returns the same result as the C++ std::nextafter function.
9500
9501It can also return a subnormal number.
9502
9503@compatibility(cpp)
9504Equivalent to C++ std::nextafter function.
9505@end_compatibility
9506  }];
9507
9508  let arguments = (ins
9509    TF_F32OrF64Tensor:$x1,
9510    TF_F32OrF64Tensor:$x2
9511  );
9512
9513  let results = (outs
9514    TF_F32OrF64Tensor:$output
9515  );
9516
9517  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9518}
9519
9520def TF_NoOp : TF_Op<"NoOp", [NoSideEffect]> {
9521  let summary = "Does nothing. Only useful as a placeholder for control edges.";
9522
9523  let arguments = (ins);
9524
9525  let results = (outs);
9526}
9527
9528def TF_NonMaxSuppressionV3Op : TF_Op<"NonMaxSuppressionV3", [NoSideEffect]> {
9529  let summary = [{
9530Greedily selects a subset of bounding boxes in descending order of score,
9531  }];
9532
9533  let description = [{
9534pruning away boxes that have high intersection-over-union (IOU) overlap
9535with previously selected boxes.  Bounding boxes with score less than
9536`score_threshold` are removed.  Bounding boxes are supplied as
9537[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
9538diagonal pair of box corners and the coordinates can be provided as normalized
9539(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
9540is agnostic to where the origin is in the coordinate system and more
9541generally is invariant to orthogonal transformations and translations
9542of the coordinate system; thus translating or reflections of the coordinate
9543system result in the same boxes being selected by the algorithm.
9544The output of this operation is a set of integers indexing into the input
9545collection of bounding boxes representing the selected boxes.  The bounding
9546box coordinates corresponding to the selected indices can then be obtained
9547using the `tf.gather operation`.  For example:
9548  selected_indices = tf.image.non_max_suppression_v2(
9549      boxes, scores, max_output_size, iou_threshold, score_threshold)
9550  selected_boxes = tf.gather(boxes, selected_indices)
9551  }];
9552
9553  let arguments = (ins
9554    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
9555    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
9556score corresponding to each box (each row of boxes).}]>:$scores,
9557    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
9558boxes to be selected by non max suppression.}]>:$max_output_size,
9559    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
9560boxes overlap too much with respect to IOU.}]>:$iou_threshold,
9561    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
9562boxes based on score.}]>:$score_threshold
9563  );
9564
9565  let results = (outs
9566    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
9567indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices
9568  );
9569
9570  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9571  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
9572
9573  let hasCanonicalizer = 1;
9574}
9575
9576def TF_NonMaxSuppressionV4Op : TF_Op<"NonMaxSuppressionV4", [NoSideEffect]> {
9577  let summary = [{
9578Greedily selects a subset of bounding boxes in descending order of score,
9579  }];
9580
9581  let description = [{
9582pruning away boxes that have high intersection-over-union (IOU) overlap
9583with previously selected boxes.  Bounding boxes with score less than
9584`score_threshold` are removed.  Bounding boxes are supplied as
9585[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
9586diagonal pair of box corners and the coordinates can be provided as normalized
9587(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
9588is agnostic to where the origin is in the coordinate system and more
9589generally is invariant to orthogonal transformations and translations
9590of the coordinate system; thus translating or reflections of the coordinate
9591system result in the same boxes being selected by the algorithm.
9592The output of this operation is a set of integers indexing into the input
9593collection of bounding boxes representing the selected boxes.  The bounding
9594box coordinates corresponding to the selected indices can then be obtained
9595using the `tf.gather operation`.  For example:
9596  selected_indices = tf.image.non_max_suppression_v2(
9597      boxes, scores, max_output_size, iou_threshold, score_threshold)
9598  selected_boxes = tf.gather(boxes, selected_indices)
9599  }];
9600
9601  let arguments = (ins
9602    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
9603    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
9604score corresponding to each box (each row of boxes).}]>:$scores,
9605    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
9606boxes to be selected by non max suppression.}]>:$max_output_size,
9607    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
9608boxes overlap too much with respect to IOU.}]>:$iou_threshold,
9609    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
9610boxes based on score.}]>:$score_threshold,
9611
9612    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
9613  );
9614
9615  let results = (outs
9616    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
9617indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
9618    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
9619`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
9620  );
9621
9622  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9623  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
9624}
9625
9626def TF_NonMaxSuppressionV5Op : TF_Op<"NonMaxSuppressionV5", [NoSideEffect]> {
9627  let summary = [{
9628Greedily selects a subset of bounding boxes in descending order of score,
9629  }];
9630
9631  let description = [{
9632pruning away boxes that have high intersection-over-union (IOU) overlap
9633with previously selected boxes.  Bounding boxes with score less than
9634`score_threshold` are removed.  Bounding boxes are supplied as
9635[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
9636diagonal pair of box corners and the coordinates can be provided as normalized
9637(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
9638is agnostic to where the origin is in the coordinate system and more
9639generally is invariant to orthogonal transformations and translations
9640of the coordinate system; thus translating or reflections of the coordinate
9641system result in the same boxes being selected by the algorithm.
9642The output of this operation is a set of integers indexing into the input
9643collection of bounding boxes representing the selected boxes.  The bounding
9644box coordinates corresponding to the selected indices can then be obtained
9645using the `tf.gather operation`.  For example:
9646  selected_indices = tf.image.non_max_suppression_v2(
9647      boxes, scores, max_output_size, iou_threshold, score_threshold)
9648  selected_boxes = tf.gather(boxes, selected_indices)
9649This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
9650Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
9651of other overlapping boxes instead of directly causing them to be pruned.
9652To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
9653larger than 0.
9654  }];
9655
9656  let arguments = (ins
9657    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
9658    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
9659score corresponding to each box (each row of boxes).}]>:$scores,
9660    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
9661boxes to be selected by non max suppression.}]>:$max_output_size,
9662    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
9663boxes overlap too much with respect to IOU.}]>:$iou_threshold,
9664    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
9665boxes based on score.}]>:$score_threshold,
9666    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
9667al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
9668is default), we fall back to standard (hard) NMS.}]>:$soft_nms_sigma,
9669
9670    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
9671  );
9672
9673  let results = (outs
9674    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
9675indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
9676    Res<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[M]` representing the corresponding
9677scores for each selected box, where `M <= max_output_size`.  Scores only differ
9678from corresponding input scores when using Soft NMS (i.e. when
9679`soft_nms_sigma>0`)}]>:$selected_scores,
9680    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
9681`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
9682  );
9683
9684  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9685}
9686
9687def TF_NotEqualOp : TF_Op<"NotEqual", [Commutative, NoSideEffect]> {
9688  let summary = "Returns the truth value of (x != y) element-wise.";
9689
9690  let description = [{
9691*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
9692[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9693  }];
9694
9695  let arguments = (ins
9696    TF_Tensor:$x,
9697    TF_Tensor:$y,
9698
9699    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
9700  );
9701
9702  let results = (outs
9703    TF_BoolTensor:$z
9704  );
9705
9706  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9707
9708  let builders = [
9709    OpBuilder<(ins "Value":$x, "Value":$y,
9710      "BoolAttr":$incompatible_shape_error)>
9711  ];
9712
9713  let verifier = [{
9714    return Verify(*this);
9715  }];
9716
9717  let hasCanonicalizer = 1;
9718}
9719
9720def TF_OneHotOp : TF_Op<"OneHot", [NoSideEffect]> {
9721  let summary = "Returns a one-hot tensor.";
9722
9723  let description = [{
9724The locations represented by indices in `indices` take value `on_value`,
9725while all other locations take value `off_value`.
9726
9727If the input `indices` is rank `N`, the output will have rank `N+1`,
9728The new axis is created at dimension `axis` (default: the new axis is
9729appended at the end).
9730
9731If `indices` is a scalar the output shape will be a vector of length `depth`.
9732
9733If `indices` is a vector of length `features`, the output shape will be:
9734```
9735  features x depth if axis == -1
9736  depth x features if axis == 0
9737```
9738
9739If `indices` is a matrix (batch) with shape `[batch, features]`,
9740the output shape will be:
9741```
9742  batch x features x depth if axis == -1
9743  batch x depth x features if axis == 1
9744  depth x batch x features if axis == 0
9745```
9746
9747
9748Examples
9749=========
9750
9751Suppose that
9752```
9753  indices = [0, 2, -1, 1]
9754  depth = 3
9755  on_value = 5.0
9756  off_value = 0.0
9757  axis = -1
9758```
9759
9760Then output is `[4 x 3]`:
9761```
9762output =
9763  [5.0 0.0 0.0]  // one_hot(0)
9764  [0.0 0.0 5.0]  // one_hot(2)
9765  [0.0 0.0 0.0]  // one_hot(-1)
9766  [0.0 5.0 0.0]  // one_hot(1)
9767```
9768
9769Suppose that
9770```
9771  indices = [0, 2, -1, 1]
9772  depth = 3
9773  on_value = 0.0
9774  off_value = 3.0
9775  axis = 0
9776```
9777
9778Then output is `[3 x 4]`:
9779```
9780output =
9781  [0.0 3.0 3.0 3.0]
9782  [3.0 3.0 3.0 0.0]
9783  [3.0 3.0 3.0 3.0]
9784  [3.0 0.0 3.0 3.0]
9785//  ^                one_hot(0)
9786//      ^            one_hot(2)
9787//          ^        one_hot(-1)
9788//              ^    one_hot(1)
9789```
9790
9791Suppose that
9792```
9793  indices = [[0, 2], [1, -1]]
9794  depth = 3
9795  on_value = 1.0
9796  off_value = 0.0
9797  axis = -1
9798```
9799
9800Then output is `[2 x 2 x 3]`:
9801```
9802output =
9803  [
9804    [1.0, 0.0, 0.0]  // one_hot(0)
9805    [0.0, 0.0, 1.0]  // one_hot(2)
9806  ][
9807    [0.0, 1.0, 0.0]  // one_hot(1)
9808    [0.0, 0.0, 0.0]  // one_hot(-1)
9809  ]
9810```
9811  }];
9812
9813  let arguments = (ins
9814    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint8]>, [{A tensor of indices.}]>:$indices,
9815    Arg<TF_Int32Tensor, [{A scalar defining the depth of the one hot dimension.}]>:$depth,
9816    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] = i`.}]>:$on_value,
9817    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] != i`.}]>:$off_value,
9818
9819    DefaultValuedAttr<I64Attr, "-1">:$axis
9820  );
9821
9822  let results = (outs
9823    Res<TF_Tensor, [{The one-hot tensor.}]>:$output
9824  );
9825
9826  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
9827  TF_DerivedOperandTypeAttr TI = TF_DerivedOperandTypeAttr<0>;
9828
9829  let builders = [
9830    OpBuilder<(ins "Value":$indices, "Value":$depth, "Value":$on_value,
9831      "Value":$off_value, "IntegerAttr":$axis)>
9832  ];
9833
9834  let verifier = [{
9835    return Verify(*this);
9836  }];
9837}
9838
9839def TF_OneShotIteratorOp : TF_Op<"OneShotIterator", []> {
9840  let summary = [{
9841Makes a "one-shot" iterator that can be iterated only once.
9842  }];
9843
9844  let description = [{
9845A one-shot iterator bundles the logic for defining the dataset and
9846the state of the iterator in a single op, which allows simple input
9847pipelines to be defined without an additional initialization
9848("MakeIterator") step.
9849
9850One-shot iterators have the following limitations:
9851
9852* They do not support parameterization: all logic for creating the underlying
9853  dataset must be bundled in the `dataset_factory` function.
9854* They are not resettable. Once a one-shot iterator reaches the end of its
9855  underlying dataset, subsequent "IteratorGetNext" operations on that
9856  iterator will always produce an `OutOfRange` error.
9857
9858For greater flexibility, use "Iterator" and "MakeIterator" to define
9859an iterator using an arbitrary subgraph, which may capture tensors
9860(including fed values) as parameters, and which may be reset multiple
9861times by rerunning "MakeIterator".
9862  }];
9863
9864  let arguments = (ins
9865    SymbolRefAttr:$dataset_factory,
9866    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9867    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
9868    DefaultValuedAttr<StrAttr, "">:$container,
9869    DefaultValuedAttr<StrAttr, "">:$shared_name
9870  );
9871
9872  let results = (outs
9873    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to an "IteratorGetNext"
9874op.}], [TF_DatasetIteratorAlloc]>:$handle
9875  );
9876}
9877
9878def TF_OnesLikeOp : TF_Op<"OnesLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
9879  let summary = "Returns a tensor of ones with the same shape and type as x.";
9880
9881  let arguments = (ins
9882    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of type T.}]>:$x
9883  );
9884
9885  let results = (outs
9886    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of the same shape and type as x but filled with ones.}]>:$y
9887  );
9888
9889  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9890}
9891
9892def TF_OptimizeDatasetV2Op : TF_Op<"OptimizeDatasetV2", [NoSideEffect]> {
9893  let summary = [{
9894Creates a dataset by applying related optimizations to `input_dataset`.
9895  }];
9896
9897  let description = [{
9898Creates a dataset by applying related optimizations to `input_dataset`.
9899  }];
9900
9901  let arguments = (ins
9902    Arg<TF_VariantTensor, [{A variant tensor representing the input dataset.}]>:$input_dataset,
9903    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying user enabled optimizations.}]>:$optimizations_enabled,
9904    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying user disabled optimizations.}]>:$optimizations_disabled,
9905    Arg<TF_StrTensor, [{A `tf.string` vector `tf.Tensor` identifying optimizations by default.}]>:$optimizations_default,
9906
9907    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9908    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
9909    DefaultValuedAttr<StrArrayAttr, "{}">:$optimization_configs
9910  );
9911
9912  let results = (outs
9913    TF_VariantTensor:$handle
9914  );
9915}
9916
9917def TF_OptionalFromValueOp : TF_Op<"OptionalFromValue", [NoSideEffect]> {
9918  let summary = "Constructs an Optional variant from a tuple of tensors.";
9919
9920  let arguments = (ins
9921    Variadic<TF_Tensor>:$components
9922  );
9923
9924  let results = (outs
9925    TF_VariantTensor:$optional
9926  );
9927
9928  TF_DerivedOperandTypeListAttr Toutput_types = TF_DerivedOperandTypeListAttr<0>;
9929}
9930
9931def TF_OptionalGetValueOp : TF_Op<"OptionalGetValue", [NoSideEffect]> {
9932  let summary = [{
9933Returns the value stored in an Optional variant or raises an error if none exists.
9934  }];
9935
9936  let arguments = (ins
9937    TF_VariantTensor:$optional
9938  );
9939
9940  let results = (outs
9941    Variadic<TF_Tensor>:$components
9942  );
9943
9944  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
9945  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
9946}
9947
9948def TF_OptionalHasValueOp : TF_Op<"OptionalHasValue", [NoSideEffect]> {
9949  let summary = [{
9950Returns true if and only if the given Optional variant has a value.
9951  }];
9952
9953  let arguments = (ins
9954    TF_VariantTensor:$optional
9955  );
9956
9957  let results = (outs
9958    TF_BoolTensor:$has_value
9959  );
9960}
9961
9962def TF_OptionalNoneOp : TF_Op<"OptionalNone", [NoSideEffect]> {
9963  let summary = "Creates an Optional variant with no value.";
9964
9965  let arguments = (ins);
9966
9967  let results = (outs
9968    TF_VariantTensor:$optional
9969  );
9970}
9971
9972def TF_OutfeedEnqueueTupleOp : TF_Op<"OutfeedEnqueueTuple", []> {
9973  let summary = "Enqueue multiple Tensor values on the computation outfeed.";
9974
9975  let arguments = (ins
9976    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be inserted into the outfeed queue as an
9977XLA tuple.}]>:$inputs
9978  );
9979
9980  let results = (outs);
9981
9982  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<0>;
9983}
9984
9985def TF_PackOp : TF_Op<"Pack", [NoSideEffect]> {
9986  let summary = [{
9987Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
9988  }];
9989
9990  let description = [{
9991Packs the `N` tensors in `values` into a tensor with rank one higher than each
9992tensor in `values`, by packing them along the `axis` dimension.
9993Given a list of tensors of shape `(A, B, C)`;
9994
9995if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
9996if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
9997Etc.
9998
9999For example:
10000
10001```
10002# 'x' is [1, 4]
10003# 'y' is [2, 5]
10004# 'z' is [3, 6]
10005pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
10006pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
10007```
10008
10009This is the opposite of `unpack`.
10010  }];
10011
10012  let arguments = (ins
10013    Arg<Variadic<TF_Tensor>, [{Must be of same shape and type.}]>:$values,
10014
10015    DefaultValuedAttr<I64Attr, "0">:$axis
10016  );
10017
10018  let results = (outs
10019    Res<TF_Tensor, [{The packed tensor.}]>:$output
10020  );
10021
10022  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
10023  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10024
10025  let verifier = [{
10026    return Verify(*this);
10027  }];
10028
10029  let hasCanonicalizer = 1;
10030
10031  let hasFolder = 1;
10032}
10033
10034def TF_PadOp : TF_Op<"Pad", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_OperandHasRank<1, 2>]> {
10035  let summary = "Pads a tensor with zeros.";
10036
10037  let description = [{
10038This operation pads a `input` with zeros according to the `paddings` you
10039specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
10040rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
10041how many zeros to add before the contents of `input` in that dimension, and
10042`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
10043in that dimension.
10044
10045The padded size of each dimension D of the output is:
10046
10047`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
10048
10049For example:
10050
10051```
10052# 't' is [[1, 1], [2, 2]]
10053# 'paddings' is [[1, 1], [2, 2]]
10054# rank of 't' is 2
10055pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
10056                      [0, 0, 1, 1, 0, 0]
10057                      [0, 0, 2, 2, 0, 0]
10058                      [0, 0, 0, 0, 0, 0]]
10059```
10060  }];
10061
10062  let arguments = (ins
10063    TF_Tensor:$input,
10064    TF_I32OrI64Tensor:$paddings
10065  );
10066
10067  let results = (outs
10068    TF_Tensor:$output
10069  );
10070
10071  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10072  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
10073
10074  let extraClassDeclaration = [{
10075    // TF_FoldOperandsTransposeInterface:
10076    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
10077    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
10078    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
10079  }];
10080}
10081
10082def TF_PadV2Op : TF_Op<"PadV2", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
10083  let summary = "Pads a tensor.";
10084
10085  let description = [{
10086This operation pads `input` according to the `paddings` and `constant_values`
10087you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
10088the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
10089how many padding values to add before the contents of `input` in that dimension,
10090and `paddings[D, 1]` indicates how many padding values to add after the contents
10091of `input` in that dimension. `constant_values` is a scalar tensor of the same
10092type as `input` that indicates the value to use for padding `input`.
10093
10094The padded size of each dimension D of the output is:
10095
10096`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
10097
10098For example:
10099
10100```
10101# 't' is [[1, 1], [2, 2]]
10102# 'paddings' is [[1, 1], [2, 2]]
10103# 'constant_values' is 0
10104# rank of 't' is 2
10105pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
10106                      [0, 0, 1, 1, 0, 0]
10107                      [0, 0, 2, 2, 0, 0]
10108                      [0, 0, 0, 0, 0, 0]]
10109```
10110  }];
10111
10112  let arguments = (ins
10113    TF_Tensor:$input,
10114    TF_I32OrI64Tensor:$paddings,
10115    TF_Tensor:$constant_values
10116  );
10117
10118  let results = (outs
10119    TF_Tensor:$output
10120  );
10121
10122  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10123  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
10124}
10125
10126def TF_ParallelDynamicStitchOp : TF_Op<"ParallelDynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
10127  let summary = [{
10128Interleave the values from the `data` tensors into a single tensor.
10129  }];
10130
10131  let description = [{
10132Builds a merged tensor such that
10133
10134```python
10135    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
10136```
10137
10138For example, if each `indices[m]` is scalar or vector, we have
10139
10140```python
10141    # Scalar indices:
10142    merged[indices[m], ...] = data[m][...]
10143
10144    # Vector indices:
10145    merged[indices[m][i], ...] = data[m][i, ...]
10146```
10147
10148Each `data[i].shape` must start with the corresponding `indices[i].shape`,
10149and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
10150must have `data[i].shape = indices[i].shape + constant`.  In terms of this
10151`constant`, the output shape is
10152
10153    merged.shape = [max(indices)] + constant
10154
10155Values may be merged in parallel, so if an index appears in both `indices[m][i]`
10156and `indices[n][j]`, the result may be invalid. This differs from the normal
10157DynamicStitch operator that defines the behavior in that case.
10158
10159For example:
10160
10161```python
10162    indices[0] = 6
10163    indices[1] = [4, 1]
10164    indices[2] = [[5, 2], [0, 3]]
10165    data[0] = [61, 62]
10166    data[1] = [[41, 42], [11, 12]]
10167    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
10168    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
10169              [51, 52], [61, 62]]
10170```
10171
10172This method can be used to merge partitions created by `dynamic_partition`
10173as illustrated on the following example:
10174
10175```python
10176    # Apply function (increments x_i) on elements for which a certain condition
10177    # apply (x_i != -1 in this example).
10178    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
10179    condition_mask=tf.not_equal(x,tf.constant(-1.))
10180    partitioned_data = tf.dynamic_partition(
10181        x, tf.cast(condition_mask, tf.int32) , 2)
10182    partitioned_data[1] = partitioned_data[1] + 1.0
10183    condition_indices = tf.dynamic_partition(
10184        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
10185    x = tf.dynamic_stitch(condition_indices, partitioned_data)
10186    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
10187    # unchanged.
10188```
10189
10190<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
10191<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
10192</div>
10193  }];
10194
10195  let arguments = (ins
10196    Variadic<TF_Int32Tensor>:$indices,
10197    Variadic<TF_Tensor>:$data
10198  );
10199
10200  let results = (outs
10201    TF_Tensor:$merged
10202  );
10203
10204  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
10205  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
10206}
10207
10208def TF_ParallelMapDatasetOp : TF_Op<"ParallelMapDataset", [NoSideEffect]> {
10209  let summary = [{
10210Creates a dataset that applies `f` to the outputs of `input_dataset`.
10211  }];
10212
10213  let description = [{
10214Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
10215to `num_parallel_calls` copies of `f` in parallel.
10216  }];
10217
10218  let arguments = (ins
10219    TF_VariantTensor:$input_dataset,
10220    Variadic<TF_Tensor>:$other_arguments,
10221    Arg<TF_Int32Tensor, [{The number of concurrent invocations of `f` that process
10222elements from `input_dataset` in parallel.}]>:$num_parallel_calls,
10223
10224    SymbolRefAttr:$f,
10225    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
10226    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
10227    DefaultValuedAttr<BoolAttr, "true">:$use_inter_op_parallelism,
10228    DefaultValuedAttr<BoolAttr, "false">:$sloppy,
10229    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
10230  );
10231
10232  let results = (outs
10233    TF_VariantTensor:$handle
10234  );
10235
10236  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
10237}
10238
10239def TF_ParameterizedTruncatedNormalOp : TF_Op<"ParameterizedTruncatedNormal", [TF_CannotDuplicate]> {
10240  let summary = [{
10241Outputs random values from a normal distribution. The parameters may each be a
10242  }];
10243
10244  let description = [{
10245scalar which applies to the entire output, or a vector of length shape[0] which
10246stores the parameters for each batch.
10247  }];
10248
10249  let arguments = (ins
10250    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor. Batches are indexed by the 0th dimension.}]>:$shape,
10251    Arg<TF_FloatTensor, [{The mean parameter of each batch.}]>:$means,
10252    Arg<TF_FloatTensor, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stdevs,
10253    Arg<TF_FloatTensor, [{The minimum cutoff. May be -infinity.}]>:$minvals,
10254    Arg<TF_FloatTensor, [{The maximum cutoff. May be +infinity, and must be more than the minval
10255for each batch.}]>:$maxvals,
10256
10257    DefaultValuedAttr<I64Attr, "0">:$seed,
10258    DefaultValuedAttr<I64Attr, "0">:$seed2
10259  );
10260
10261  let results = (outs
10262    Res<TF_FloatTensor, [{A matrix of shape num_batches x samples_per_batch, filled with random
10263truncated normal values using the parameters for each row.}]>:$output
10264  );
10265
10266  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10267  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
10268}
10269
10270def TF_PartitionedCallOp : TF_Op<"PartitionedCall", [CallOpInterface, NoSideEffect]> {
10271  let summary = [{
10272returns `f(inputs)`, where `f`'s body is placed and partitioned.
10273  }];
10274
10275  let description = [{
10276Asynchronously executes a function, potentially across multiple devices but
10277within a single process. The kernel places and partitions a given function's
10278underlying graph, and executes each of the partitioned subgraphs as a function.
10279  }];
10280
10281  let arguments = (ins
10282    Arg<Variadic<TF_Tensor>, [{A list of input tensors.}]>:$args,
10283
10284    SymbolRefAttr:$f,
10285    DefaultValuedAttr<StrAttr, "">:$config,
10286    DefaultValuedAttr<StrAttr, "">:$config_proto,
10287    DefaultValuedAttr<StrAttr, "">:$executor_type
10288  );
10289
10290  let results = (outs
10291    Res<Variadic<TF_Tensor>, [{A list of return values.}]>:$output
10292  );
10293
10294  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
10295  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
10296
10297  let extraClassDeclaration = [{
10298    // Gets the argument operands to the called function.
10299    operand_range getArgOperands() { return args(); }
10300
10301    // Returns the callee of this operation.
10302    CallInterfaceCallable getCallableForCallee() { return fAttr(); }
10303
10304    // returns the callee of this operation.
10305    FuncOp func() {
10306      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
10307    }
10308  }];
10309
10310  let verifier = [{ return VerifyPartitionedCall(*this); }];
10311}
10312
10313def TF_PolygammaOp : TF_Op<"Polygamma", [NoSideEffect, ResultsBroadcastableShape]>,
10314                     WithBroadcastableBinOpBuilder {
10315  let summary = [{
10316Compute the polygamma function \\(\psi^{(n)}(x)\\).
10317  }];
10318
10319  let description = [{
10320The polygamma function is defined as:
10321
10322
10323\\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
10324
10325where \\(\psi(x)\\) is the digamma function.
10326The polygamma function is defined only for non-negative integer orders \\a\\.
10327  }];
10328
10329  let arguments = (ins
10330    TF_F32OrF64Tensor:$a,
10331    TF_F32OrF64Tensor:$x
10332  );
10333
10334  let results = (outs
10335    TF_F32OrF64Tensor:$z
10336  );
10337
10338  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10339}
10340
10341def TF_PopulationCountOp : TF_Op<"PopulationCount", [NoSideEffect, SameOperandsAndResultShape]> {
10342  let summary = [{
10343Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
10344  }];
10345
10346  let description = [{
10347For each entry in `x`, calculates the number of `1` (on) bits in the binary
10348representation of that entry.
10349
10350**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
10351`int32` or `int64` and perform the bitcount on the result, than to feed in
103528- or 16-bit inputs and then aggregate the resulting counts.
10353  }];
10354
10355  let arguments = (ins
10356    TF_IntTensor:$x
10357  );
10358
10359  let results = (outs
10360    TF_Uint8Tensor:$y
10361  );
10362
10363  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10364}
10365
10366def TF_PowOp : TF_Op<"Pow", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
10367               WithBroadcastableBinOpBuilder {
10368  let summary = "Computes the power of one value to another.";
10369
10370  let description = [{
10371Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
10372corresponding elements in `x` and `y`. For example:
10373
10374```
10375# tensor 'x' is [[2, 2]], [3, 3]]
10376# tensor 'y' is [[8, 16], [2, 3]]
10377tf.pow(x, y) ==> [[256, 65536], [9, 27]]
10378```
10379  }];
10380
10381  let arguments = (ins
10382    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x,
10383    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
10384  );
10385
10386  let results = (outs
10387    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$z
10388  );
10389
10390  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10391
10392  let hasFolder = 1;
10393}
10394
10395def TF_PrefetchDatasetOp : TF_Op<"PrefetchDataset", [NoSideEffect]> {
10396  let summary = [{
10397Creates a dataset that asynchronously prefetches elements from `input_dataset`.
10398  }];
10399
10400  let arguments = (ins
10401    TF_VariantTensor:$input_dataset,
10402    Arg<TF_Int64Tensor, [{The maximum number of elements to buffer in an iterator over
10403this dataset.}]>:$buffer_size,
10404
10405    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
10406    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
10407    DefaultValuedAttr<I64Attr, "0">:$slack_period,
10408    DefaultValuedAttr<BoolAttr, "true">:$legacy_autotune,
10409    DefaultValuedAttr<I64Attr, "0">:$buffer_size_min
10410  );
10411
10412  let results = (outs
10413    TF_VariantTensor:$handle
10414  );
10415}
10416
10417def TF_PreventGradientOp : TF_Op<"PreventGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10418  let summary = [{
10419An identity op that triggers an error if a gradient is requested.
10420  }];
10421
10422  let description = [{
10423When executed in a graph, this op outputs its input tensor as-is.
10424
10425When building ops to compute gradients, the TensorFlow gradient system
10426will return an error when trying to lookup the gradient of this op,
10427because no gradient must ever be registered for this function.  This
10428op exists to prevent subtle bugs from silently returning unimplemented
10429gradients in some corner cases.
10430  }];
10431
10432  let arguments = (ins
10433    Arg<TF_Tensor, [{any tensor.}]>:$input,
10434
10435    DefaultValuedAttr<StrAttr, "">:$message
10436  );
10437
10438  let results = (outs
10439    Res<TF_Tensor, [{the same input tensor.}]>:$output
10440  );
10441
10442  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10443}
10444
10445def TF_PrintOp : TF_Op<"Print", []> {
10446  let summary = "Prints a list of tensors.";
10447
10448  let description = [{
10449Passes `input` through to `output` and prints `data` when evaluating.
10450  }];
10451
10452  let arguments = (ins
10453    Arg<TF_Tensor, [{The tensor passed to `output`}]>:$input,
10454    Arg<Variadic<TF_Tensor>, [{A list of tensors to print out when op is evaluated.}]>:$data,
10455
10456    DefaultValuedAttr<StrAttr, "">:$message,
10457    DefaultValuedAttr<I64Attr, "-1">:$first_n,
10458    DefaultValuedAttr<I64Attr, "3">:$summarize
10459  );
10460
10461  let results = (outs
10462    Res<TF_Tensor, [{The unmodified `input` tensor}]>:$output
10463  );
10464
10465  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10466  TF_DerivedOperandTypeListAttr U = TF_DerivedOperandTypeListAttr<1>;
10467}
10468
10469def TF_PrintV2Op : TF_Op<"PrintV2", []> {
10470  let summary = "Prints a string scalar.";
10471
10472  let description = [{
10473Prints a string scalar to the desired output_stream.
10474  }];
10475
10476  let arguments = (ins
10477    Arg<TF_StrTensor, [{The string scalar to print.}]>:$input,
10478
10479    DefaultValuedAttr<StrAttr, "stderr">:$output_stream,
10480    DefaultValuedAttr<StrAttr, "\n">:$end
10481  );
10482
10483  let results = (outs);
10484}
10485
10486def TF_ProdOp : TF_Op<"Prod", [NoSideEffect]> {
10487  let summary = [{
10488Computes the product of elements across dimensions of a tensor.
10489  }];
10490
10491  let description = [{
10492Reduces `input` along the dimensions given in `axis`. Unless
10493`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
10494`axis`. If `keep_dims` is true, the reduced dimensions are
10495retained with length 1.
10496  }];
10497
10498  let arguments = (ins
10499    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
10500    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
10501`[-rank(input), rank(input))`.}]>:$reduction_indices,
10502
10503    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
10504  );
10505
10506  let results = (outs
10507    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
10508  );
10509
10510  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10511  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
10512}
10513
10514def TF_QrOp : TF_Op<"Qr", [NoSideEffect]> {
10515  let summary = "Computes the QR decompositions of one or more matrices.";
10516
10517  let description = [{
10518Computes the QR decomposition of each inner matrix in `tensor` such that
10519`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
10520
10521Currently, the gradient for the QR decomposition is well-defined only when
10522the first `P` columns of the inner matrix are linearly independent, where
10523`P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
10524
10525```python
10526# a is a tensor.
10527# q is a tensor of orthonormal matrices.
10528# r is a tensor of upper triangular matrices.
10529q, r = qr(a)
10530q_full, r_full = qr(a, full_matrices=True)
10531```
10532  }];
10533
10534  let arguments = (ins
10535    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
10536form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
10537
10538    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
10539  );
10540
10541  let results = (outs
10542    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Orthonormal basis for range of `a`. If `full_matrices` is `False` then
10543shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
10544`[..., M, M]`.}]>:$q,
10545    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Triangular factor. If `full_matrices` is `False` then shape is
10546`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.}]>:$r
10547  );
10548
10549  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10550
10551  let verifier = [{
10552    return Verify(*this);
10553  }];
10554}
10555
10556def TF_QuantizeAndDequantizeOp : TF_Op<"QuantizeAndDequantize", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10557  let summary = "Use QuantizeAndDequantizeV2 instead.";
10558
10559  let arguments = (ins
10560    TF_FloatTensor:$input,
10561
10562    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
10563    DefaultValuedAttr<I64Attr, "8">:$num_bits,
10564    DefaultValuedAttr<BoolAttr, "false">:$range_given,
10565    DefaultValuedAttr<F32Attr, "0.0f">:$input_min,
10566    DefaultValuedAttr<F32Attr, "0.0f">:$input_max
10567  );
10568
10569  let results = (outs
10570    TF_FloatTensor:$output
10571  );
10572
10573  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10574}
10575
10576def TF_QuantizeAndDequantizeV2Op : TF_Op<"QuantizeAndDequantizeV2", [NoSideEffect]> {
10577  let summary = "Quantizes then dequantizes a tensor.";
10578
10579  let description = [{
10580This op simulates the precision loss from the quantized forward pass by:
10581
105821. Quantizing the tensor to fixed point numbers, which should match the target
10583   quantization method when it is used in inference.
105842. Dequantizing it back to floating point numbers for the following ops, most
10585   likely matmul.
10586
10587There are different ways to quantize. This version uses only scaling, so 0.0
10588maps to 0.
10589
10590From the specified 'num_bits' in the quantized output type, it determines
10591minimum and maximum representable quantized values.
10592
10593e.g.
10594
10595*   [-128, 127] for signed, num_bits = 8, or
10596*   [0, 255] for unsigned, num_bits = 8.
10597
10598If range_given == False, the initial input_min, input_max will be determined
10599automatically as the minimum and maximum values in the input tensor, otherwise
10600the specified values of input_min, input_max are used.
10601
10602Note: If the input_min, input_max are specified, they do not need to equal the
10603actual minimum and maximum values in the tensor. e.g. in some cases it may be
10604beneficial to specify these values such that the low probability extremes of the
10605input distribution are clipped.
10606
10607This op determines the maximum scale_factor that would map the initial
10608[input_min, input_max] range to a range that lies within the representable
10609quantized range.
10610
10611It determines the scale from one of input_min and input_max, then updates the
10612other one to maximize the representable range.
10613
10614e.g.
10615
10616*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
10617    5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
10618    would update input_max to be 127 / 12.8 = 9.921875
10619*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
10620    10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
10621    would update input_min to be 128.0 / 12.7 = -10.07874
10622*   if the output is unsigned, input_min is forced to be 0, and only the
10623    specified input_max is used.
10624
10625After determining the scale_factor and updating the input range, it applies the
10626following to each value in the 'input' tensor.
10627
10628output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
10629
10630The above round function rounds the value based on the given round_mode.
10631  }];
10632
10633  let arguments = (ins
10634    Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
10635    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the minimum input value that needs to
10636be represented, otherwise it is determined from the min value of the `input`
10637tensor.}]>:$input_min,
10638    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the maximum input value that needs to
10639be represented, otherwise it is determined from the max value of the `input`
10640tensor.}]>:$input_max,
10641
10642    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
10643    DefaultValuedAttr<I64Attr, "8">:$num_bits,
10644    DefaultValuedAttr<BoolAttr, "false">:$range_given,
10645    DefaultValuedAttr<TF_AnyStrAttrOf<["HALF_TO_EVEN", "HALF_UP"]>, "HALF_TO_EVEN">:$round_mode,
10646    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
10647    DefaultValuedAttr<I64Attr, "-1">:$axis
10648  );
10649
10650  let results = (outs
10651    TF_FloatTensor:$output
10652  );
10653
10654  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10655
10656  let hasCanonicalizer = 1;
10657}
10658
10659def TF_QuantizeAndDequantizeV3Op : TF_Op<"QuantizeAndDequantizeV3", [NoSideEffect]> {
10660  let summary = "Quantizes then dequantizes a tensor.";
10661
10662  let description = [{
10663This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
10664tensor, so its value can change during training.
10665  }];
10666
10667  let arguments = (ins
10668    TF_FloatTensor:$input,
10669    TF_FloatTensor:$input_min,
10670    TF_FloatTensor:$input_max,
10671    TF_Int32Tensor:$num_bits,
10672
10673    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
10674    DefaultValuedAttr<BoolAttr, "true">:$range_given,
10675    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
10676    DefaultValuedAttr<I64Attr, "-1">:$axis
10677  );
10678
10679  let results = (outs
10680    TF_FloatTensor:$output
10681  );
10682
10683  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10684}
10685
10686def TF_QuantizeAndDequantizeV4Op : TF_Op<"QuantizeAndDequantizeV4", [NoSideEffect]> {
10687  let summary = "Quantizes then dequantizes a tensor.";
10688
10689  let description = [{
10690This is almost identical to QuantizeAndDequantizeV2, except that it returns a
10691gradient of 1 for inputs that are within the quantization range, or 0 otherwise.
10692  }];
10693
10694  let arguments = (ins
10695    Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
10696    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the minimum input value that needs to
10697be represented, otherwise it is determined from the min value of the `input`
10698tensor.}]>:$input_min,
10699    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the maximum input value that needs to
10700be represented, otherwise it is determined from the max value of the `input`
10701tensor.}]>:$input_max,
10702
10703    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
10704    DefaultValuedAttr<I64Attr, "8">:$num_bits,
10705    DefaultValuedAttr<BoolAttr, "false">:$range_given,
10706    DefaultValuedAttr<TF_AnyStrAttrOf<["HALF_TO_EVEN", "HALF_UP"]>, "HALF_TO_EVEN">:$round_mode,
10707    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
10708    DefaultValuedAttr<I64Attr, "-1">:$axis
10709  );
10710
10711  let results = (outs
10712    TF_FloatTensor:$output
10713  );
10714
10715  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10716}
10717
10718def TF_QueueDequeueV2Op : TF_Op<"QueueDequeueV2", []> {
10719  let summary = "Dequeues a tuple of one or more tensors from the given queue.";
10720
10721  let description = [{
10722This operation has k outputs, where k is the number of components
10723in the tuples stored in the given queue, and output i is the ith
10724component of the dequeued tuple.
10725
10726N.B. If the queue is empty, this operation will block until an element
10727has been dequeued (or 'timeout_ms' elapses, if specified).
10728  }];
10729
10730  let arguments = (ins
10731    Arg<TF_ResourceTensor, [{The handle to a queue.}]>:$handle,
10732
10733    DefaultValuedAttr<I64Attr, "-1">:$timeout_ms
10734  );
10735
10736  let results = (outs
10737    Res<Variadic<TF_Tensor>, [{One or more tensors that were dequeued as a tuple.}]>:$components
10738  );
10739
10740  TF_DerivedResultTypeListAttr component_types = TF_DerivedResultTypeListAttr<0>;
10741}
10742
10743def TF_RFFTOp : TF_Op<"RFFT", [NoSideEffect]> {
10744  let summary = "Real-valued fast Fourier transform.";
10745
10746  let description = [{
10747Computes the 1-dimensional discrete Fourier transform of a real-valued signal
10748over the inner-most dimension of `input`.
10749
10750Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
10751`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
10752followed by the `fft_length / 2` positive-frequency terms.
10753
10754Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
10755corresponding dimension of `input`, the dimension is cropped. If it is larger,
10756the dimension is padded with zeros.
10757  }];
10758
10759  let arguments = (ins
10760    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
10761    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
10762  );
10763
10764  let results = (outs
10765    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most
10766  dimension of `input` is replaced with the `fft_length / 2 + 1` unique
10767  frequency components of its 1D Fourier transform.
10768
10769@compatibility(numpy)
10770Equivalent to np.fft.rfft
10771@end_compatibility}]>:$output
10772  );
10773
10774  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
10775  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
10776}
10777
10778def TF_RFFT2DOp : TF_Op<"RFFT2D", [NoSideEffect]> {
10779  let summary = "2D real-valued fast Fourier transform.";
10780
10781  let description = [{
10782Computes the 2-dimensional discrete Fourier transform of a real-valued signal
10783over the inner-most 2 dimensions of `input`.
10784
10785Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
10786`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
10787of `output`: the zero-frequency term, followed by the `fft_length / 2`
10788positive-frequency terms.
10789
10790Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
10791corresponding dimension of `input`, the dimension is cropped. If it is larger,
10792the dimension is padded with zeros.
10793  }];
10794
10795  let arguments = (ins
10796    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
10797    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
10798  );
10799
10800  let results = (outs
10801    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 2
10802  dimensions of `input` are replaced with their 2D Fourier transform. The
10803  inner-most dimension contains `fft_length / 2 + 1` unique frequency
10804  components.
10805
10806@compatibility(numpy)
10807Equivalent to np.fft.rfft2
10808@end_compatibility}]>:$output
10809  );
10810
10811  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
10812  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
10813}
10814
10815def TF_RFFT3DOp : TF_Op<"RFFT3D", [NoSideEffect]> {
10816  let summary = "3D real-valued fast Fourier transform.";
10817
10818  let description = [{
10819Computes the 3-dimensional discrete Fourier transform of a real-valued signal
10820over the inner-most 3 dimensions of `input`.
10821
10822Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
10823`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
10824of `output`: the zero-frequency term, followed by the `fft_length / 2`
10825positive-frequency terms.
10826
10827Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
10828corresponding dimension of `input`, the dimension is cropped. If it is larger,
10829the dimension is padded with zeros.
10830  }];
10831
10832  let arguments = (ins
10833    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
10834    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
10835  );
10836
10837  let results = (outs
10838    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 3
10839  dimensions of `input` are replaced with the their 3D Fourier transform. The
10840  inner-most dimension contains `fft_length / 2 + 1` unique frequency
10841  components.
10842
10843@compatibility(numpy)
10844Equivalent to np.fft.rfftn with 3 dimensions.
10845@end_compatibility}]>:$output
10846  );
10847
10848  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
10849  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
10850}
10851
10852def TF_RGBToHSVOp : TF_Op<"RGBToHSV", [NoSideEffect]> {
10853  let summary = "Converts one or more images from RGB to HSV.";
10854
10855  let description = [{
10856Outputs a tensor of the same shape as the `images` tensor, containing the HSV
10857value of the pixels. The output is only well defined if the value in `images`
10858are in `[0,1]`.
10859
10860`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
10861`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
10862corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
10863
10864Usage Example:
10865
10866>>> blue_image = tf.stack([
10867...    tf.zeros([5,5]),
10868...    tf.zeros([5,5]),
10869...    tf.ones([5,5])],
10870...    axis=-1)
10871>>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
10872>>> blue_hsv_image[0,0].numpy()
10873array([0.6666667, 1. , 1. ], dtype=float32)
10874  }];
10875
10876  let arguments = (ins
10877    Arg<TF_FloatTensor, [{1-D or higher rank. RGB data to convert. Last dimension must be size 3.}]>:$images
10878  );
10879
10880  let results = (outs
10881    Res<TF_FloatTensor, [{`images` converted to HSV.}]>:$output
10882  );
10883
10884  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10885}
10886
10887def TF_RaggedGatherOp : TF_Op<"RaggedGather", [NoSideEffect]> {
10888  let summary = [{
10889Gather ragged slices from `params` axis `0` according to `indices`.
10890  }];
10891
10892  let description = [{
10893Outputs a `RaggedTensor` output composed from `output_dense_values` and
10894`output_nested_splits`, such that:
10895
10896```python
10897output.shape = indices.shape + params.shape[1:]
10898output.ragged_rank = indices.shape.ndims + params.ragged_rank
10899output[i...j, d0...dn] = params[indices[i...j], d0...dn]
10900```
10901
10902where
10903
10904* `params =
10905   ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
10906   provides the values that should be gathered.
10907* `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
10908   values should be gathered.
10909* `output =
10910   ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
10911   is the output tensor.
10912
10913(Note: This c++ op is used to implement the higher-level python
10914`tf.ragged.gather` op, which also supports ragged indices.)
10915  }];
10916
10917  let arguments = (ins
10918    Arg<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
10919`params` RaggedTensor input.}]>:$params_nested_splits,
10920    Arg<TF_Tensor, [{The `flat_values` for the `params` RaggedTensor. There was a terminology change
10921at the python level from dense_values to flat_values, so dense_values is the
10922deprecated name.}]>:$params_dense_values,
10923    Arg<TF_I32OrI64Tensor, [{Indices in the outermost dimension of `params` of the values that should be
10924gathered.}]>:$indices
10925  );
10926
10927  let results = (outs
10928    Res<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
10929returned RaggedTensor.}]>:$output_nested_splits,
10930    Res<TF_Tensor, [{The `flat_values` for the returned RaggedTensor.}]>:$output_dense_values
10931  );
10932
10933  TF_DerivedOperandSizeAttr PARAMS_RAGGED_RANK = TF_DerivedOperandSizeAttr<0>;
10934  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
10935  TF_DerivedOperandTypeAttr Tsplits = TF_DerivedOperandTypeAttr<0>;
10936  TF_DerivedOperandTypeAttr Tvalues = TF_DerivedOperandTypeAttr<1>;
10937  TF_DerivedResultSizeAttr OUTPUT_RAGGED_RANK = TF_DerivedResultSizeAttr<0>;
10938}
10939
10940def TF_RaggedRangeOp : TF_Op<"RaggedRange", [NoSideEffect]> {
10941  let summary = [{
10942Returns a `RaggedTensor` containing the specified sequences of numbers.
10943  }];
10944
10945  let description = [{
10946Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
10947`rt_nested_splits`, such that
10948`result[i] = range(starts[i], limits[i], deltas[i])`.
10949
10950```python
10951(rt_nested_splits, rt_dense_values) = ragged_range(
10952      starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
10953result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
10954print(result)
10955<tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
10956```
10957
10958The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
10959The vector inputs must all have the same size.  Scalar inputs are broadcast
10960to match the size of the vector inputs.
10961  }];
10962
10963  let arguments = (ins
10964    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The starts of each range.}]>:$starts,
10965    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The limits of each range.}]>:$limits,
10966    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The deltas of each range.}]>:$deltas
10967  );
10968
10969  let results = (outs
10970    Res<TF_I32OrI64Tensor, [{The `row_splits` for the returned `RaggedTensor`.}]>:$rt_nested_splits,
10971    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The `flat_values` for the returned `RaggedTensor`.}]>:$rt_dense_values
10972  );
10973
10974  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10975  TF_DerivedResultTypeAttr Tsplits = TF_DerivedResultTypeAttr<0>;
10976}
10977
10978def TF_RandomGammaOp : TF_Op<"RandomGamma", [TF_CannotDuplicate]> {
10979  let summary = [{
10980Outputs random values from the Gamma distribution(s) described by alpha.
10981  }];
10982
10983  let description = [{
10984This op uses the algorithm by Marsaglia et al. to acquire samples via
10985transformation-rejection from pairs of uniform and normal random variables.
10986See http://dl.acm.org/citation.cfm?id=358414
10987  }];
10988
10989  let arguments = (ins
10990    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
10991distribution described by the shape parameters given in alpha.}]>:$shape,
10992    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor in which each scalar is a "shape" parameter describing the
10993associated gamma distribution.}]>:$alpha,
10994
10995    DefaultValuedAttr<I64Attr, "0">:$seed,
10996    DefaultValuedAttr<I64Attr, "0">:$seed2
10997  );
10998
10999  let results = (outs
11000    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor with shape `shape + shape(alpha)`. Each slice
11001`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
11002`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.}]>:$output
11003  );
11004
11005  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
11006  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11007}
11008
11009def TF_RandomGammaGradOp : TF_Op<"RandomGammaGrad", [NoSideEffect, ResultsBroadcastableShape]>,
11010                           WithBroadcastableBinOpBuilder {
11011  let summary = [{
11012Computes the derivative of a Gamma random sample w.r.t. `alpha`.
11013  }];
11014
11015  let arguments = (ins
11016    TF_F32OrF64Tensor:$alpha,
11017    TF_F32OrF64Tensor:$sample
11018  );
11019
11020  let results = (outs
11021    TF_F32OrF64Tensor:$output
11022  );
11023
11024  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11025}
11026
11027def TF_RandomPoissonOp : TF_Op<"RandomPoisson", [TF_CannotDuplicate]> {
11028  let summary = "Use RandomPoissonV2 instead.";
11029
11030  let arguments = (ins
11031    TF_I32OrI64Tensor:$shape,
11032    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$rate,
11033
11034    DefaultValuedAttr<I64Attr, "0">:$seed,
11035    DefaultValuedAttr<I64Attr, "0">:$seed2
11036  );
11037
11038  let results = (outs
11039    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
11040  );
11041
11042  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
11043  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
11044}
11045
11046def TF_RandomPoissonV2Op : TF_Op<"RandomPoissonV2", [TF_CannotDuplicate]> {
11047  let summary = [{
11048Outputs random values from the Poisson distribution(s) described by rate.
11049  }];
11050
11051  let description = [{
11052This op uses two algorithms, depending on rate. If rate >= 10, then
11053the algorithm by Hormann is used to acquire samples via
11054transformation-rejection.
11055See http://www.sciencedirect.com/science/article/pii/0167668793909974.
11056
11057Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
11058random variables.
11059See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
11060Programming, Volume 2. Addison Wesley
11061  }];
11062
11063  let arguments = (ins
11064    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
11065distribution described by the shape parameters given in rate.}]>:$shape,
11066    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor in which each scalar is a "rate" parameter describing the
11067associated poisson distribution.}]>:$rate,
11068
11069    DefaultValuedAttr<I64Attr, "0">:$seed,
11070    DefaultValuedAttr<I64Attr, "0">:$seed2
11071  );
11072
11073  let results = (outs
11074    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor with shape `shape + shape(rate)`. Each slice
11075`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
11076`rate[i0, i1, ...iN]`.}]>:$output
11077  );
11078
11079  TF_DerivedOperandTypeAttr R = TF_DerivedOperandTypeAttr<1>;
11080  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
11081  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11082}
11083
11084def TF_RandomShuffleOp : TF_Op<"RandomShuffle", [TF_CannotDuplicate, TF_SameOperandsAndResultTypeResolveRef]> {
11085  let summary = "Randomly shuffles a tensor along its first dimension.";
11086
11087  let description = [{
11088The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
11089  to one and only one `output[i]`. For example, a mapping that might occur for a
11090  3x2 tensor is:
11091
11092```
11093[[1, 2],       [[5, 6],
11094 [3, 4],  ==>   [1, 2],
11095 [5, 6]]        [3, 4]]
11096```
11097  }];
11098
11099  let arguments = (ins
11100    Arg<TF_Tensor, [{The tensor to be shuffled.}]>:$value,
11101
11102    DefaultValuedAttr<I64Attr, "0">:$seed,
11103    DefaultValuedAttr<I64Attr, "0">:$seed2
11104  );
11105
11106  let results = (outs
11107    Res<TF_Tensor, [{A tensor of same shape and type as `value`, shuffled along its first
11108dimension.}]>:$output
11109  );
11110
11111  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11112}
11113
11114def TF_RandomStandardNormalOp : TF_Op<"RandomStandardNormal", [TF_CannotDuplicate]> {
11115  let summary = "Outputs random values from a normal distribution.";
11116
11117  let description = [{
11118The generated values will have mean 0 and standard deviation 1.
11119  }];
11120
11121  let arguments = (ins
11122    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
11123
11124    DefaultValuedAttr<I64Attr, "0">:$seed,
11125    DefaultValuedAttr<I64Attr, "0">:$seed2
11126  );
11127
11128  let results = (outs
11129    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random normal values.}]>:$output
11130  );
11131
11132  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11133  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11134}
11135
11136def TF_RandomUniformOp : TF_Op<"RandomUniform", [TF_CannotDuplicate]> {
11137  let summary = "Outputs random values from a uniform distribution.";
11138
11139  let description = [{
11140The generated values follow a uniform distribution in the range `[0, 1)`. The
11141lower bound 0 is included in the range, while the upper bound 1 is excluded.
11142  }];
11143
11144  let arguments = (ins
11145    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
11146
11147    DefaultValuedAttr<I64Attr, "0">:$seed,
11148    DefaultValuedAttr<I64Attr, "0">:$seed2
11149  );
11150
11151  let results = (outs
11152    Res<TF_FloatTensor, [{A tensor of the specified shape filled with uniform random values.}]>:$output
11153  );
11154
11155  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11156  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11157
11158  let verifier = [{
11159    return Verify(*this);
11160  }];
11161}
11162
11163def TF_RandomUniformIntOp : TF_Op<"RandomUniformInt", [TF_CannotDuplicate]> {
11164  let summary = "Outputs random integers from a uniform distribution.";
11165
11166  let description = [{
11167The generated values are uniform integers in the range `[minval, maxval)`.
11168The lower bound `minval` is included in the range, while the upper bound
11169`maxval` is excluded.
11170
11171The random integers are slightly biased unless `maxval - minval` is an exact
11172power of two.  The bias is small for values of `maxval - minval` significantly
11173smaller than the range of the output (either `2^32` or `2^64`).
11174  }];
11175
11176  let arguments = (ins
11177    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
11178    Arg<TF_I32OrI64Tensor, [{0-D.  Inclusive lower bound on the generated integers.}]>:$minval,
11179    Arg<TF_I32OrI64Tensor, [{0-D.  Exclusive upper bound on the generated integers.}]>:$maxval,
11180
11181    DefaultValuedAttr<I64Attr, "0">:$seed,
11182    DefaultValuedAttr<I64Attr, "0">:$seed2
11183  );
11184
11185  let results = (outs
11186    Res<TF_I32OrI64Tensor, [{A tensor of the specified shape filled with uniform random integers.}]>:$output
11187  );
11188
11189  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11190  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<1>;
11191}
11192
11193def TF_RangeOp : TF_Op<"Range", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
11194  let summary = "Creates a sequence of numbers.";
11195
11196  let description = [{
11197This operation creates a sequence of numbers that begins at `start` and
11198extends by increments of `delta` up to but not including `limit`.
11199
11200For example:
11201
11202```
11203# 'start' is 3
11204# 'limit' is 18
11205# 'delta' is 3
11206tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
11207```
11208  }];
11209
11210  let arguments = (ins
11211    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint32]>, [{0-D (scalar). First entry in the sequence.}]>:$start,
11212    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint32]>, [{0-D (scalar). Upper limit of sequence, exclusive.}]>:$limit,
11213    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint32]>, [{0-D (scalar). Optional. Default is 1. Number that increments `start`.}]>:$delta
11214  );
11215
11216  let results = (outs
11217    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint32]>, [{1-D.}]>:$output
11218  );
11219
11220  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>;
11221
11222  let builders = [
11223    OpBuilder<(ins "Value":$start, "Value":$limit, "Value":$delta)>
11224  ];
11225
11226  let hasFolder = 1;
11227
11228}
11229
11230def TF_RangeDatasetOp : TF_Op<"RangeDataset", [NoSideEffect, TF_NoConstantFold]> {
11231  let summary = [{
11232Creates a dataset with a range of values. Corresponds to python's xrange.
11233  }];
11234
11235  let arguments = (ins
11236    Arg<TF_Int64Tensor, [{corresponds to start in python's xrange().}]>:$start,
11237    Arg<TF_Int64Tensor, [{corresponds to stop in python's xrange().}]>:$stop,
11238    Arg<TF_Int64Tensor, [{corresponds to step in python's xrange().}]>:$step,
11239
11240    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
11241    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
11242  );
11243
11244  let results = (outs
11245    TF_VariantTensor:$handle
11246  );
11247}
11248
11249def TF_RankOp : TF_Op<"Rank", [NoSideEffect]> {
11250  let summary = "Returns the rank of a tensor.";
11251
11252  let description = [{
11253This operation returns an integer representing the rank of `input`.
11254
11255For example:
11256
11257```
11258# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
11259# shape of tensor 't' is [2, 2, 3]
11260rank(t) ==> 3
11261```
11262
11263**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
11264of a tensor is the number of indices required to uniquely select each element
11265of the tensor. Rank is also known as "order", "degree", or "ndims."
11266  }];
11267
11268  let arguments = (ins
11269    TF_Tensor:$input
11270  );
11271
11272  let results = (outs
11273    TF_Int32Tensor:$output
11274  );
11275
11276  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11277
11278  let builders = [
11279    OpBuilder<(ins "Value":$input)>
11280  ];
11281
11282  let hasFolder = 1;
11283}
11284
11285def TF_ReadVariableOp : TF_Op<"ReadVariableOp", []> {
11286  let summary = "Reads the value of a variable.";
11287
11288  let description = [{
11289The tensor returned by this operation is immutable.
11290
11291The value returned by this operation is guaranteed to be influenced by all the
11292writes on which this operation depends directly or indirectly, and to not be
11293influenced by any of the writes which depend directly or indirectly on this
11294operation.
11295  }];
11296
11297  let arguments = (ins
11298    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead]>:$resource
11299  );
11300
11301  let results = (outs
11302    TF_Tensor:$value
11303  );
11304
11305  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11306
11307  let hasCanonicalizer = 1;
11308}
11309
11310def TF_RealOp : TF_Op<"Real", [NoSideEffect, SameOperandsAndResultShape]> {
11311  let summary = "Returns the real part of a complex number.";
11312
11313  let description = [{
11314Given a tensor `input` of complex numbers, this operation returns a tensor of
11315type `float` that is the real part of each element in `input`. All elements in
11316`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
11317 part returned by this operation and *b* is the imaginary part.
11318
11319For example:
11320
11321```
11322# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
11323tf.real(input) ==> [-2.25, 3.25]
11324```
11325  }];
11326
11327  let arguments = (ins
11328    TensorOf<[TF_Complex128, TF_Complex64]>:$input
11329  );
11330
11331  let results = (outs
11332    TF_F32OrF64Tensor:$output
11333  );
11334
11335  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11336  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
11337}
11338
11339def TF_RealDivOp : TF_Op<"RealDiv", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary]>,
11340                   WithBroadcastableBinOpBuilder {
11341  let summary = "Returns x / y element-wise for real types.";
11342
11343  let description = [{
11344If `x` and `y` are reals, this will return the floating-point division.
11345
11346*NOTE*: `Div` supports broadcasting. More about broadcasting
11347[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
11348  }];
11349
11350  let arguments = (ins
11351    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
11352    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
11353  );
11354
11355  let results = (outs
11356    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
11357  );
11358
11359  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11360
11361  let hasCanonicalizer = 1;
11362
11363  let hasFolder = 1;
11364}
11365
11366def TF_ReciprocalOp : TF_Op<"Reciprocal", [Involution, NoSideEffect, SameOperandsAndResultType]> {
11367  let summary = "Computes the reciprocal of x element-wise.";
11368
11369  let description = [{
11370I.e., \\(y = 1 / x\\).
11371  }];
11372
11373  let arguments = (ins
11374    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
11375  );
11376
11377  let results = (outs
11378    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
11379  );
11380
11381  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11382}
11383
11384def TF_ReciprocalGradOp : TF_Op<"ReciprocalGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
11385  let summary = "Computes the gradient for the inverse of `x` wrt its input.";
11386
11387  let description = [{
11388Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
11389is the corresponding input gradient.
11390  }];
11391
11392  let arguments = (ins
11393    TF_FpOrComplexTensor:$y,
11394    TF_FpOrComplexTensor:$dy
11395  );
11396
11397  let results = (outs
11398    TF_FpOrComplexTensor:$z
11399  );
11400
11401  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11402}
11403
11404def TF_RecvOp : TF_Op<"Recv", []> {
11405  let summary = "Receives the named tensor from send_device on recv_device.";
11406
11407  let arguments = (ins
11408    StrAttr:$tensor_name,
11409    StrAttr:$send_device,
11410    I64Attr:$send_device_incarnation,
11411    StrAttr:$recv_device,
11412    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
11413  );
11414
11415  let results = (outs
11416    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
11417  );
11418
11419  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
11420}
11421
11422def TF_RecvTPUEmbeddingActivationsOp : TF_Op<"RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
11423  let summary = "An op that receives embedding activations on the TPU.";
11424
11425  let description = [{
11426The TPU system performs the embedding lookups and aggregations specified by
11427the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
11428results of these aggregations are visible to the Tensorflow Graph as the
11429outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
11430one Tensor of activations per table specified in the model. There can be at
11431most one RecvTPUEmbeddingActivations op in the TPU graph.
11432  }];
11433
11434  let arguments = (ins
11435    StrAttr:$config
11436  );
11437
11438  let results = (outs
11439    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
11440embedding table in the model.}]>:$outputs
11441  );
11442
11443  TF_DerivedResultSizeAttr num_outputs = TF_DerivedResultSizeAttr<0>;
11444}
11445
11446def TF_ReduceJoinOp : TF_Op<"ReduceJoin", [NoSideEffect]> {
11447  let summary = "Joins a string Tensor across the given dimensions.";
11448
11449  let description = [{
11450Computes the string join across dimensions in the given string Tensor of shape
11451`[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
11452strings with the given separator (default: empty string).  Negative indices are
11453counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
11454indices are not specified, joins across all dimensions beginning from `n - 1`
11455through `0`.
11456
11457For example:
11458
11459```python
11460# tensor `a` is [["a", "b"], ["c", "d"]]
11461tf.reduce_join(a, 0) ==> ["ac", "bd"]
11462tf.reduce_join(a, 1) ==> ["ab", "cd"]
11463tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
11464tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
11465tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
11466tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
11467tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
11468tf.reduce_join(a, [0, 1]) ==> "acbd"
11469tf.reduce_join(a, [1, 0]) ==> "abcd"
11470tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
11471tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
11472```
11473  }];
11474
11475  let arguments = (ins
11476    Arg<TF_StrTensor, [{The input to be joined.  All reduced indices must have non-zero size.}]>:$inputs,
11477    Arg<TF_Int32Tensor, [{The dimensions to reduce over.  Dimensions are reduced in the
11478order specified.  Omitting `reduction_indices` is equivalent to passing
11479`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.}]>:$reduction_indices,
11480
11481    DefaultValuedAttr<BoolAttr, "false">:$keep_dims,
11482    DefaultValuedAttr<StrAttr, "">:$separator
11483  );
11484
11485  let results = (outs
11486    Res<TF_StrTensor, [{Has shape equal to that of the input with reduced dimensions removed or
11487set to `1` depending on `keep_dims`.}]>:$output
11488  );
11489}
11490
11491def TF_ReluOp : TF_Op<"Relu", [Idempotent, NoSideEffect, SameOperandsAndResultType, TF_LayoutAgnostic]> {
11492  let summary = "Computes rectified linear: `max(features, 0)`.";
11493
11494  let description = [{
11495See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
11496Example usage:
11497>>> tf.nn.relu([-2., 0., 3.]).numpy()
11498array([0., 0., 3.], dtype=float32)
11499  }];
11500
11501  let arguments = (ins
11502    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$features
11503  );
11504
11505  let results = (outs
11506    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$activations
11507  );
11508
11509  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11510}
11511
11512def TF_Relu6Op : TF_Op<"Relu6", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
11513  let summary = "Computes rectified linear 6: `min(max(features, 0), 6)`.";
11514
11515  let arguments = (ins
11516    TF_IntOrFpTensor:$features
11517  );
11518
11519  let results = (outs
11520    TF_IntOrFpTensor:$activations
11521  );
11522
11523  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11524}
11525
11526def TF_Relu6GradOp : TF_Op<"Relu6Grad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
11527  let summary = "Computes rectified linear 6 gradients for a Relu6 operation.";
11528
11529  let arguments = (ins
11530    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients,
11531    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or
11532its output; using either one produces the same result.}]>:$features
11533  );
11534
11535  let results = (outs
11536    Res<TF_IntOrFpTensor, [{The gradients:
11537`gradients * (features > 0) * (features < 6)`.}]>:$backprops
11538  );
11539
11540  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11541}
11542
11543def TF_ReluGradOp : TF_Op<"ReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
11544  let summary = "Computes rectified linear gradients for a Relu operation.";
11545
11546  let arguments = (ins
11547    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients,
11548    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR
11549the outputs of that operation (both work equivalently).}]>:$features
11550  );
11551
11552  let results = (outs
11553    Res<TF_IntOrFpTensor, [{`gradients * (features > 0)`.}]>:$backprops
11554  );
11555
11556  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11557}
11558
11559def TF_RemoteCallOp : TF_Op<"RemoteCall", []> {
11560  let summary = "Runs function `f` on a remote device indicated by `target`.";
11561
11562  let arguments = (ins
11563    Arg<TF_StrTensor, [{A fully specified device name where we want to run the function.}]>:$target,
11564    Arg<Variadic<TF_Tensor>, [{A list of arguments for the function.}]>:$args,
11565
11566    SymbolRefAttr:$f
11567  );
11568
11569  let results = (outs
11570    Res<Variadic<TF_Tensor>, [{A list of return values.}]>:$output
11571  );
11572
11573  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
11574  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
11575}
11576
11577def TF_RepeatDatasetOp : TF_Op<"RepeatDataset", [NoSideEffect]> {
11578  let summary = [{
11579Creates a dataset that emits the outputs of `input_dataset` `count` times.
11580  }];
11581
11582  let arguments = (ins
11583    TF_VariantTensor:$input_dataset,
11584    Arg<TF_Int64Tensor, [{A scalar representing the number of times that `input_dataset` should
11585be repeated. A value of `-1` indicates that it should be repeated infinitely.}]>:$count,
11586
11587    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
11588    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
11589  );
11590
11591  let results = (outs
11592    TF_VariantTensor:$handle
11593  );
11594}
11595
11596def TF_ReshapeOp : TF_Op<"Reshape", [NoSideEffect]> {
11597  let summary = "Reshapes a tensor.";
11598
11599  let description = [{
11600Given `tensor`, this operation returns a tensor that has the same values
11601as `tensor` with shape `shape`.
11602
11603If one component of 1-D tensor `shape` is the special value -1, the size of that
11604dimension is computed so that the total size remains constant.  In particular, a
11605`shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
11606unknown.
11607
11608The `shape` must be 1-D and the operation returns a tensor with shape
11609`shape` filled with the values of `tensor`. In this case, the number of elements
11610implied by `shape` must be the same as the number of elements in `tensor`.
11611
11612It is an error if `shape` is not 1-D.
11613
11614For example:
11615
11616```
11617# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
11618# tensor 't' has shape [9]
11619reshape(t, [3, 3]) ==> [[1, 2, 3],
11620                        [4, 5, 6],
11621                        [7, 8, 9]]
11622
11623# tensor 't' is [[[1, 1], [2, 2]],
11624#                [[3, 3], [4, 4]]]
11625# tensor 't' has shape [2, 2, 2]
11626reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
11627                        [3, 3, 4, 4]]
11628
11629# tensor 't' is [[[1, 1, 1],
11630#                 [2, 2, 2]],
11631#                [[3, 3, 3],
11632#                 [4, 4, 4]],
11633#                [[5, 5, 5],
11634#                 [6, 6, 6]]]
11635# tensor 't' has shape [3, 2, 3]
11636# pass '[-1]' to flatten 't'
11637reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
11638
11639# -1 can also be used to infer the shape
11640
11641# -1 is inferred to be 9:
11642reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
11643                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
11644# -1 is inferred to be 2:
11645reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
11646                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
11647# -1 is inferred to be 3:
11648reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
11649                              [2, 2, 2],
11650                              [3, 3, 3]],
11651                             [[4, 4, 4],
11652                              [5, 5, 5],
11653                              [6, 6, 6]]]
11654
11655# tensor 't' is [7]
11656# shape `[]` reshapes to a scalar
11657reshape(t, []) ==> 7
11658```
11659  }];
11660
11661  let arguments = (ins
11662    TF_Tensor:$tensor,
11663    Arg<TF_I32OrI64Tensor, [{Defines the shape of the output tensor.}]>:$shape
11664  );
11665
11666  let results = (outs
11667    TF_Tensor:$output
11668  );
11669
11670  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11671  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<1>;
11672
11673  let builders = [
11674    OpBuilder<(ins "Value":$tensor, "Value":$shape)>
11675  ];
11676
11677  let verifier = [{
11678    return Verify(*this);
11679  }];
11680
11681  let hasCanonicalizer = 1;
11682  let hasFolder = 1;
11683}
11684
11685def TF_ResizeBilinearOp : TF_Op<"ResizeBilinear", [NoSideEffect]> {
11686  let summary = "Resize `images` to `size` using bilinear interpolation.";
11687
11688  let description = [{
11689Input images can be of different types but output images are always float.
11690  }];
11691
11692  let arguments = (ins
11693    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
11694    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
11695new size for the images.}]>:$size,
11696
11697    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
11698    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
11699  );
11700
11701  let results = (outs
11702    Res<TF_Float32Tensor, [{4-D with shape
11703`[batch, new_height, new_width, channels]`.}]>:$resized_images
11704  );
11705
11706  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11707}
11708
11709def TF_ResizeBilinearGradOp : TF_Op<"ResizeBilinearGrad", [NoSideEffect]> {
11710  let summary = "Computes the gradient of bilinear interpolation.";
11711
11712  let arguments = (ins
11713    Arg<TF_Float32Tensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
11714    Arg<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`,
11715The image tensor that was resized.}]>:$original_image,
11716
11717    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
11718    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
11719  );
11720
11721  let results = (outs
11722    Res<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`.
11723Gradients with respect to the input image. Input image must have been
11724float or double.}]>:$output
11725  );
11726
11727  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11728}
11729
11730def TF_ResizeNearestNeighborOp : TF_Op<"ResizeNearestNeighbor", [NoSideEffect]> {
11731  let summary = [{
11732Resize `images` to `size` using nearest neighbor interpolation.
11733  }];
11734
11735  let arguments = (ins
11736    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
11737    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
11738new size for the images.}]>:$size,
11739
11740    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
11741    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
11742  );
11743
11744  let results = (outs
11745    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape
11746`[batch, new_height, new_width, channels]`.}]>:$resized_images
11747  );
11748
11749  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11750}
11751
11752def TF_ResizeNearestNeighborGradOp : TF_Op<"ResizeNearestNeighborGrad", [NoSideEffect]> {
11753  let summary = "Computes the gradient of nearest neighbor interpolation.";
11754
11755  let arguments = (ins
11756    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
11757    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
11758original input size.}]>:$size,
11759
11760    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
11761    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
11762  );
11763
11764  let results = (outs
11765    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
11766with respect to the input image.}]>:$output
11767  );
11768
11769  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
11770}
11771
11772def TF_ResourceApplyAdaMaxOp : TF_Op<"ResourceApplyAdaMax", []> {
11773  let summary = "Update '*var' according to the AdaMax algorithm.";
11774
11775  let description = [{
11776m_t <- beta1 * m_{t-1} + (1 - beta1) * g
11777v_t <- max(beta2 * v_{t-1}, abs(g))
11778variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
11779  }];
11780
11781  let arguments = (ins
11782    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11783    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11784    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
11785    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
11786    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11787    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
11788    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
11789    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11790    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11791
11792    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11793  );
11794
11795  let results = (outs);
11796
11797  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11798}
11799
11800def TF_ResourceApplyAdadeltaOp : TF_Op<"ResourceApplyAdadelta", []> {
11801  let summary = "Update '*var' according to the adadelta scheme.";
11802
11803  let description = [{
11804accum = rho() * accum + (1 - rho()) * grad.square();
11805update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
11806update_accum = rho() * update_accum + (1 - rho()) * update.square();
11807var -= update;
11808  }];
11809
11810  let arguments = (ins
11811    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11812    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11813    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum_update,
11814    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11815    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay factor. Must be a scalar.}]>:$rho,
11816    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
11817    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11818
11819    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11820  );
11821
11822  let results = (outs);
11823
11824  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11825}
11826
11827def TF_ResourceApplyAdagradOp : TF_Op<"ResourceApplyAdagrad", []> {
11828  let summary = "Update '*var' according to the adagrad scheme.";
11829
11830  let description = [{
11831accum += grad * grad
11832var -= lr * grad * (1 / sqrt(accum))
11833  }];
11834
11835  let arguments = (ins
11836    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11837    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11838    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11839    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11840
11841    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11842    DefaultValuedAttr<BoolAttr, "true">:$update_slots
11843  );
11844
11845  let results = (outs);
11846
11847  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11848}
11849
11850def TF_ResourceApplyAdagradDAOp : TF_Op<"ResourceApplyAdagradDA", []> {
11851  let summary = "Update '*var' according to the proximal adagrad scheme.";
11852
11853  let arguments = (ins
11854    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11855    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_accumulator,
11856    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_squared_accumulator,
11857    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11858    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11859    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11860    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11861    Arg<TF_Int64Tensor, [{Training step number. Must be a scalar.}]>:$global_step,
11862
11863    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11864  );
11865
11866  let results = (outs);
11867
11868  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11869}
11870
11871def TF_ResourceApplyAdagradV2Op : TF_Op<"ResourceApplyAdagradV2", []> {
11872  let summary = "Update '*var' according to the adagrad scheme.";
11873
11874  let description = [{
11875accum += grad * grad
11876var -= lr * grad * (1 / (sqrt(accum) + epsilon))
11877  }];
11878
11879  let arguments = (ins
11880    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11881    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11882    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11883    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
11884    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11885
11886    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11887    DefaultValuedAttr<BoolAttr, "true">:$update_slots
11888  );
11889
11890  let results = (outs);
11891
11892  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11893}
11894
11895def TF_ResourceApplyAdamOp : TF_Op<"ResourceApplyAdam", []> {
11896  let summary = "Update '*var' according to the Adam algorithm.";
11897
11898  let description = [{
11899$$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$
11900$$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$
11901$$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$
11902$$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\\\  \text{var} - m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$
11903  }];
11904
11905  let arguments = (ins
11906    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11907    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11908    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
11909    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
11910    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta2_power,
11911    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11912    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
11913    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
11914    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11915    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11916
11917    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11918    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11919  );
11920
11921  let results = (outs);
11922
11923  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11924}
11925
11926def TF_ResourceApplyAddSignOp : TF_Op<"ResourceApplyAddSign", []> {
11927  let summary = "Update '*var' according to the AddSign update.";
11928
11929  let description = [{
11930m_t <- beta1 * m_{t-1} + (1 - beta1) * g
11931update <- (alpha + sign_decay * sign(g) *sign(m)) * g
11932variable <- variable - lr_t * update
11933  }];
11934
11935  let arguments = (ins
11936    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11937    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11938    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11939    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$alpha,
11940    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
11941    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
11942    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11943
11944    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11945  );
11946
11947  let results = (outs);
11948
11949  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11950}
11951
11952def TF_ResourceApplyCenteredRMSPropOp : TF_Op<"ResourceApplyCenteredRMSProp", []> {
11953  let summary = "Update '*var' according to the centered RMSProp algorithm.";
11954
11955  let description = [{
11956The centered RMSProp algorithm uses an estimate of the centered second moment
11957(i.e., the variance) for normalization, as opposed to regular RMSProp, which
11958uses the (uncentered) second moment. This often helps with training, but is
11959slightly more expensive in terms of computation and memory.
11960
11961Note that in dense implementation of this algorithm, mg, ms, and mom will
11962update even if the grad is zero, but in this sparse implementation, mg, ms,
11963and mom will not update in iterations during which the grad is zero.
11964
11965mean_square = decay * mean_square + (1-decay) * gradient ** 2
11966mean_grad = decay * mean_grad + (1-decay) * gradient
11967
11968Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
11969
11970mg <- rho * mg_{t-1} + (1-rho) * grad
11971ms <- rho * ms_{t-1} + (1-rho) * grad * grad
11972mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
11973var <- var - mom
11974  }];
11975
11976  let arguments = (ins
11977    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11978    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mg,
11979    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
11980    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
11981    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11982    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
11983    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum Scale. Must be a scalar.}]>:$momentum,
11984    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11985    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11986
11987    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11988  );
11989
11990  let results = (outs);
11991
11992  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
11993}
11994
11995def TF_ResourceApplyFtrlOp : TF_Op<"ResourceApplyFtrl", []> {
11996  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
11997
11998  let description = [{
11999accum_new = accum + grad * grad
12000linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
12001quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
12002var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
12003accum = accum_new
12004  }];
12005
12006  let arguments = (ins
12007    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12008    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
12009    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
12010    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12011    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12012    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
12013    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
12014    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
12015
12016    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
12017    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
12018  );
12019
12020  let results = (outs);
12021
12022  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
12023}
12024
12025def TF_ResourceApplyFtrlV2Op : TF_Op<"ResourceApplyFtrlV2", []> {
12026  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
12027
12028  let description = [{
12029accum_new = accum + grad * grad
12030grad_with_shrinkage = grad + 2 * l2_shrinkage * var
12031linear += grad_with_shrinkage +
12032    (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
12033quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
12034var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
12035accum = accum_new
12036  }];
12037
12038  let arguments = (ins
12039    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12040    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
12041    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
12042    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12043    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12044    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
12045    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 shrinkage regularization. Must be a scalar.}]>:$l2,
12046    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2_shrinkage,
12047    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
12048
12049    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
12050    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
12051  );
12052
12053  let results = (outs);
12054
12055  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
12056}
12057
12058def TF_ResourceApplyGradientDescentOp : TF_Op<"ResourceApplyGradientDescent", []> {
12059  let summary = "Update '*var' by subtracting 'alpha' * 'delta' from it.";
12060
12061  let arguments = (ins
12062    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12063    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
12064    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
12065
12066    DefaultValuedAttr<BoolAttr, "false">:$use_locking
12067  );
12068
12069  let results = (outs);
12070
12071  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
12072}
12073
12074def TF_ResourceApplyKerasMomentumOp : TF_Op<"ResourceApplyKerasMomentum", []> {
12075  let summary = "Update '*var' according to the momentum scheme.";
12076
12077  let description = [{
12078Set use_nesterov = True if you want to use Nesterov momentum.
12079
12080accum = accum * momentum - lr * grad
12081var += accum
12082  }];
12083
12084  let arguments = (ins
12085    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12086    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
12087    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12088    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12089    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
12090
12091    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
12092    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
12093  );
12094
12095  let results = (outs);
12096
12097  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12098}
12099
12100def TF_ResourceApplyMomentumOp : TF_Op<"ResourceApplyMomentum", []> {
12101  let summary = "Update '*var' according to the momentum scheme.";
12102
12103  let description = [{
12104Set use_nesterov = True if you want to use Nesterov momentum.
12105
12106accum = accum * momentum + grad
12107var -= lr * accum
12108  }];
12109
12110  let arguments = (ins
12111    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12112    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
12113    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12114    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12115    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
12116
12117    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
12118    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
12119  );
12120
12121  let results = (outs);
12122
12123  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12124}
12125
12126def TF_ResourceApplyPowerSignOp : TF_Op<"ResourceApplyPowerSign", []> {
12127  let summary = "Update '*var' according to the AddSign update.";
12128
12129  let description = [{
12130m_t <- beta1 * m_{t-1} + (1 - beta1) * g
12131update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
12132variable <- variable - lr_t * update
12133  }];
12134
12135  let arguments = (ins
12136    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12137    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
12138    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12139    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$logbase,
12140    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
12141    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
12142    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12143
12144    DefaultValuedAttr<BoolAttr, "false">:$use_locking
12145  );
12146
12147  let results = (outs);
12148
12149  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12150}
12151
12152def TF_ResourceApplyProximalAdagradOp : TF_Op<"ResourceApplyProximalAdagrad", []> {
12153  let summary = [{
12154Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
12155  }];
12156
12157  let description = [{
12158accum += grad * grad
12159prox_v = var - lr * grad * (1 / sqrt(accum))
12160var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
12161  }];
12162
12163  let arguments = (ins
12164    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12165    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
12166    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12167    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
12168    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
12169    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12170
12171    DefaultValuedAttr<BoolAttr, "false">:$use_locking
12172  );
12173
12174  let results = (outs);
12175
12176  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12177}
12178
12179def TF_ResourceApplyProximalGradientDescentOp : TF_Op<"ResourceApplyProximalGradientDescent", []> {
12180  let summary = "Update '*var' as FOBOS algorithm with fixed learning rate.";
12181
12182  let description = [{
12183prox_v = var - alpha * delta
12184var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
12185  }];
12186
12187  let arguments = (ins
12188    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12189    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
12190    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
12191    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
12192    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
12193
12194    DefaultValuedAttr<BoolAttr, "false">:$use_locking
12195  );
12196
12197  let results = (outs);
12198
12199  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
12200}
12201
12202def TF_ResourceApplyRMSPropOp : TF_Op<"ResourceApplyRMSProp", []> {
12203  let summary = "Update '*var' according to the RMSProp algorithm.";
12204
12205  let description = [{
12206Note that in dense implementation of this algorithm, ms and mom will
12207update even if the grad is zero, but in this sparse implementation, ms
12208and mom will not update in iterations during which the grad is zero.
12209
12210mean_square = decay * mean_square + (1-decay) * gradient ** 2
12211Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
12212
12213ms <- rho * ms_{t-1} + (1-rho) * grad * grad
12214mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
12215var <- var - mom
12216  }];
12217
12218  let arguments = (ins
12219    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
12220    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
12221    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
12222    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
12223    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
12224    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
12225    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
12226    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
12227
12228    DefaultValuedAttr<BoolAttr, "false">:$use_locking
12229  );
12230
12231  let results = (outs);
12232
12233  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
12234}
12235
12236def TF_ResourceGatherOp : TF_Op<"ResourceGather", []> {
12237  let summary = [{
12238Gather slices from the variable pointed to by `resource` according to `indices`.
12239  }];
12240
12241  let description = [{
12242`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
12243Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
12244
12245```python
12246    # Scalar indices
12247    output[:, ..., :] = params[indices, :, ... :]
12248
12249    # Vector indices
12250    output[i, :, ..., :] = params[indices[i], :, ... :]
12251
12252    # Higher rank indices
12253    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
12254```
12255  }];
12256
12257  let arguments = (ins
12258    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$resource,
12259    TF_I32OrI64Tensor:$indices,
12260
12261    DefaultValuedAttr<I64Attr, "0">:$batch_dims,
12262    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
12263  );
12264
12265  let results = (outs
12266    TF_Tensor:$output
12267  );
12268
12269  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12270  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
12271}
12272
12273def TF_ResourceScatterAddOp : TF_Op<"ResourceScatterAdd", []> {
12274  let summary = "Adds sparse updates to the variable referenced by `resource`.";
12275
12276  let description = [{
12277This operation computes
12278
12279    # Scalar indices
12280    ref[indices, ...] += updates[...]
12281
12282    # Vector indices (for each i)
12283    ref[indices[i], ...] += updates[i, ...]
12284
12285    # High rank indices (for each i, ..., j)
12286    ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
12287
12288Duplicate entries are handled correctly: if multiple `indices` reference
12289the same location, their contributions add.
12290
12291Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12292
12293<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12294<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12295</div>
12296  }];
12297
12298  let arguments = (ins
12299    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12300    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12301    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12302  );
12303
12304  let results = (outs);
12305
12306  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12307  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12308}
12309
12310def TF_ResourceScatterDivOp : TF_Op<"ResourceScatterDiv", []> {
12311  let summary = [{
12312Divides sparse updates into the variable referenced by `resource`.
12313  }];
12314
12315  let description = [{
12316This operation computes
12317
12318    # Scalar indices
12319    ref[indices, ...] /= updates[...]
12320
12321    # Vector indices (for each i)
12322    ref[indices[i], ...] /= updates[i, ...]
12323
12324    # High rank indices (for each i, ..., j)
12325    ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
12326
12327Duplicate entries are handled correctly: if multiple `indices` reference
12328the same location, their contributions multiply.
12329
12330Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12331
12332<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12333<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12334</div>
12335  }];
12336
12337  let arguments = (ins
12338    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12339    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12340    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12341  );
12342
12343  let results = (outs);
12344
12345  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12346  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12347}
12348
12349def TF_ResourceScatterMaxOp : TF_Op<"ResourceScatterMax", []> {
12350  let summary = [{
12351Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
12352  }];
12353
12354  let description = [{
12355This operation computes
12356
12357    # Scalar indices
12358    ref[indices, ...] = max(ref[indices, ...], updates[...])
12359
12360    # Vector indices (for each i)
12361    ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
12362
12363    # High rank indices (for each i, ..., j)
12364    ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
12365
12366Duplicate entries are handled correctly: if multiple `indices` reference
12367the same location, their contributions are combined.
12368
12369Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12370
12371<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12372<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12373</div>
12374  }];
12375
12376  let arguments = (ins
12377    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12378    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12379    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12380  );
12381
12382  let results = (outs);
12383
12384  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12385  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12386}
12387
12388def TF_ResourceScatterMinOp : TF_Op<"ResourceScatterMin", []> {
12389  let summary = [{
12390Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
12391  }];
12392
12393  let description = [{
12394This operation computes
12395
12396    # Scalar indices
12397    ref[indices, ...] = min(ref[indices, ...], updates[...])
12398
12399    # Vector indices (for each i)
12400    ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
12401
12402    # High rank indices (for each i, ..., j)
12403    ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
12404
12405Duplicate entries are handled correctly: if multiple `indices` reference
12406the same location, their contributions are combined.
12407
12408Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12409
12410<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12411<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12412</div>
12413  }];
12414
12415  let arguments = (ins
12416    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12417    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12418    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12419  );
12420
12421  let results = (outs);
12422
12423  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12424  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12425}
12426
12427def TF_ResourceScatterMulOp : TF_Op<"ResourceScatterMul", []> {
12428  let summary = [{
12429Multiplies sparse updates into the variable referenced by `resource`.
12430  }];
12431
12432  let description = [{
12433This operation computes
12434
12435    # Scalar indices
12436    ref[indices, ...] *= updates[...]
12437
12438    # Vector indices (for each i)
12439    ref[indices[i], ...] *= updates[i, ...]
12440
12441    # High rank indices (for each i, ..., j)
12442    ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
12443
12444Duplicate entries are handled correctly: if multiple `indices` reference
12445the same location, their contributions multiply.
12446
12447Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12448
12449<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12450<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12451</div>
12452  }];
12453
12454  let arguments = (ins
12455    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12456    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12457    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12458  );
12459
12460  let results = (outs);
12461
12462  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12463  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12464}
12465
12466def TF_ResourceScatterNdAddOp : TF_Op<"ResourceScatterNdAdd", []> {
12467  let summary = [{
12468Applies sparse addition to individual values or slices in a Variable.
12469  }];
12470
12471  let description = [{
12472`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
12473
12474`indices` must be integer tensor, containing indices into `ref`.
12475It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
12476
12477The innermost dimension of `indices` (with length `K`) corresponds to
12478indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
12479dimension of `ref`.
12480
12481`updates` is `Tensor` of rank `Q-1+P-K` with shape:
12482
12483```
12484[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
12485```
12486
12487For example, say we want to add 4 scattered elements to a rank-1 tensor to
124888 elements. In Python, that addition would look like this:
12489
12490```python
12491ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
12492indices = tf.constant([[4], [3], [1], [7]])
12493updates = tf.constant([9, 10, 11, 12])
12494add = tf.scatter_nd_add(ref, indices, updates)
12495with tf.Session() as sess:
12496  print sess.run(add)
12497```
12498
12499The resulting update to ref would look like this:
12500
12501    [1, 13, 3, 14, 14, 6, 7, 20]
12502
12503See `tf.scatter_nd` for more details about how to make updates to
12504slices.
12505  }];
12506
12507  let arguments = (ins
12508    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
12509    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
12510A tensor of indices into ref.}]>:$indices,
12511    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
12512values to add to ref.}]>:$updates,
12513
12514    DefaultValuedAttr<BoolAttr, "true">:$use_locking
12515  );
12516
12517  let results = (outs);
12518
12519  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12520  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12521}
12522
12523def TF_ResourceScatterNdSubOp : TF_Op<"ResourceScatterNdSub", []> {
12524  let summary = [{
12525Applies sparse subtraction to individual values or slices in a Variable.
12526  }];
12527
12528  let description = [{
12529`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
12530
12531`indices` must be integer tensor, containing indices into `ref`.
12532It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
12533
12534The innermost dimension of `indices` (with length `K`) corresponds to
12535indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
12536dimension of `ref`.
12537
12538`updates` is `Tensor` of rank `Q-1+P-K` with shape:
12539
12540```
12541[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
12542```
12543
12544For example, say we want to subtract 4 scattered elements from a rank-1 tensor
12545with 8 elements. In Python, that subtraction would look like this:
12546
12547```python
12548ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
12549indices = tf.constant([[4], [3], [1], [7]])
12550updates = tf.constant([9, 10, 11, 12])
12551sub = tf.scatter_nd_sub(ref, indices, updates)
12552with tf.Session() as sess:
12553  print sess.run(sub)
12554```
12555
12556The resulting update to ref would look like this:
12557
12558    [1, -9, 3, -6, -4, 6, 7, -4]
12559
12560See `tf.scatter_nd` for more details about how to make updates to
12561slices.
12562  }];
12563
12564  let arguments = (ins
12565    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
12566    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
12567A tensor of indices into ref.}]>:$indices,
12568    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
12569values to add to ref.}]>:$updates,
12570
12571    DefaultValuedAttr<BoolAttr, "true">:$use_locking
12572  );
12573
12574  let results = (outs);
12575
12576  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12577  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12578}
12579
12580def TF_ResourceScatterNdUpdateOp : TF_Op<"ResourceScatterNdUpdate", []> {
12581  let summary = [{
12582Applies sparse `updates` to individual values or slices within a given
12583  }];
12584
12585  let description = [{
12586variable according to `indices`.
12587
12588`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
12589
12590`indices` must be integer tensor, containing indices into `ref`.
12591It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
12592
12593The innermost dimension of `indices` (with length `K`) corresponds to
12594indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
12595dimension of `ref`.
12596
12597`updates` is `Tensor` of rank `Q-1+P-K` with shape:
12598
12599```
12600[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
12601```
12602
12603For example, say we want to update 4 scattered elements to a rank-1 tensor to
126048 elements. In Python, that update would look like this:
12605
12606```python
12607    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
12608    indices = tf.constant([[4], [3], [1] ,[7]])
12609    updates = tf.constant([9, 10, 11, 12])
12610    update = tf.scatter_nd_update(ref, indices, updates)
12611    with tf.Session() as sess:
12612      print sess.run(update)
12613```
12614
12615The resulting update to ref would look like this:
12616
12617    [1, 11, 3, 10, 9, 6, 7, 12]
12618
12619See `tf.scatter_nd` for more details about how to make updates to
12620slices.
12621  }];
12622
12623  let arguments = (ins
12624    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
12625    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
12626A tensor of indices into ref.}]>:$indices,
12627    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of updated
12628values to add to ref.}]>:$updates,
12629
12630    DefaultValuedAttr<BoolAttr, "true">:$use_locking
12631  );
12632
12633  let results = (outs);
12634
12635  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
12636  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12637}
12638
12639def TF_ResourceScatterSubOp : TF_Op<"ResourceScatterSub", []> {
12640  let summary = [{
12641Subtracts sparse updates from the variable referenced by `resource`.
12642  }];
12643
12644  let description = [{
12645This operation computes
12646
12647    # Scalar indices
12648    ref[indices, ...] -= updates[...]
12649
12650    # Vector indices (for each i)
12651    ref[indices[i], ...] -= updates[i, ...]
12652
12653    # High rank indices (for each i, ..., j)
12654    ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
12655
12656Duplicate entries are handled correctly: if multiple `indices` reference
12657the same location, their contributions add.
12658
12659Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
12660
12661<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12662<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
12663</div>
12664  }];
12665
12666  let arguments = (ins
12667    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12668    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12669    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
12670  );
12671
12672  let results = (outs);
12673
12674  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12675  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12676}
12677
12678def TF_ResourceScatterUpdateOp : TF_Op<"ResourceScatterUpdate", []> {
12679  let summary = [{
12680Assigns sparse updates to the variable referenced by `resource`.
12681  }];
12682
12683  let description = [{
12684This operation computes
12685
12686    # Scalar indices
12687    ref[indices, ...] = updates[...]
12688
12689    # Vector indices (for each i)
12690    ref[indices[i], ...] = updates[i, ...]
12691
12692    # High rank indices (for each i, ..., j)
12693    ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
12694  }];
12695
12696  let arguments = (ins
12697    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
12698    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
12699    Arg<TF_Tensor, [{A tensor of updated values to add to `ref`.}]>:$updates
12700  );
12701
12702  let results = (outs);
12703
12704  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
12705  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
12706}
12707
12708def TF_ResourceStridedSliceAssignOp : TF_Op<"ResourceStridedSliceAssign", []> {
12709  let summary = "Assign `value` to the sliced l-value reference of `ref`.";
12710
12711  let description = [{
12712The values of `value` are assigned to the positions in the variable
12713`ref` that are selected by the slice parameters. The slice parameters
12714`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
12715
12716NOTE this op currently does not support broadcasting and so `value`'s
12717shape must be exactly the shape produced by the slice of `ref`.
12718  }];
12719
12720  let arguments = (ins
12721    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
12722    TF_I32OrI64Tensor:$begin,
12723    TF_I32OrI64Tensor:$end,
12724    TF_I32OrI64Tensor:$strides,
12725    TF_Tensor:$value,
12726
12727    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
12728    DefaultValuedAttr<I64Attr, "0">:$end_mask,
12729    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
12730    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
12731    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
12732  );
12733
12734  let results = (outs);
12735
12736  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
12737  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
12738}
12739
12740def TF_RestoreOp : TF_Op<"Restore", []> {
12741  let summary = "Restores a tensor from checkpoint files.";
12742
12743  let description = [{
12744Reads a tensor stored in one or several files. If there are several files (for
12745instance because a tensor was saved as slices), `file_pattern` may contain
12746wildcard symbols (`*` and `?`) in the filename portion only, not in the
12747directory portion.
12748
12749If a `file_pattern` matches several files, `preferred_shard` can be used to hint
12750in which file the requested tensor is likely to be found. This op will first
12751open the file at index `preferred_shard` in the list of matching files and try
12752to restore tensors from that file.  Only if some tensors or tensor slices are
12753not found in that first file, then the Op opens all the files. Setting
12754`preferred_shard` to match the value passed as the `shard` input
12755of a matching `Save` Op may speed up Restore.  This attribute only affects
12756performance, not correctness.  The default value -1 means files are processed in
12757order.
12758
12759See also `RestoreSlice`.
12760  }];
12761
12762  let arguments = (ins
12763    Arg<TF_StrTensor, [{Must have a single element. The pattern of the files from
12764which we read the tensor.}]>:$file_pattern,
12765    Arg<TF_StrTensor, [{Must have a single element. The name of the tensor to be
12766restored.}]>:$tensor_name,
12767
12768    DefaultValuedAttr<I64Attr, "-1">:$preferred_shard
12769  );
12770
12771  let results = (outs
12772    Res<TF_Tensor, [{The restored tensor.}]>:$tensor
12773  );
12774
12775  TF_DerivedResultTypeAttr dt = TF_DerivedResultTypeAttr<0>;
12776}
12777
12778def TF_RestoreV2Op : TF_Op<"RestoreV2", []> {
12779  let summary = "Restores tensors from a V2 checkpoint.";
12780
12781  let description = [{
12782For backward compatibility with the V1 format, this Op currently allows
12783restoring from a V1 checkpoint as well:
12784  - This Op first attempts to find the V2 index file pointed to by "prefix", and
12785    if found proceed to read it as a V2 checkpoint;
12786  - Otherwise the V1 read path is invoked.
12787Relying on this behavior is not recommended, as the ability to fall back to read
12788V1 might be deprecated and eventually removed.
12789
12790By default, restores the named tensors in full.  If the caller wishes to restore
12791specific slices of stored tensors, "shape_and_slices" should be non-empty
12792strings and correspondingly well-formed.
12793
12794Callers must ensure all the named tensors are indeed stored in the checkpoint.
12795  }];
12796
12797  let arguments = (ins
12798    Arg<TF_StrTensor, [{Must have a single element.  The prefix of a V2 checkpoint.}]>:$prefix,
12799    Arg<TF_StrTensor, [{shape {N}.  The names of the tensors to be restored.}]>:$tensor_names,
12800    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be restored.
12801Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices
12802  );
12803
12804  let results = (outs
12805    Res<Variadic<TF_Tensor>, [{shape {N}.  The restored tensors, whose shapes are read from the
12806checkpoint directly.}]>:$tensors
12807  );
12808
12809  TF_DerivedResultTypeListAttr dtypes = TF_DerivedResultTypeListAttr<0>;
12810}
12811
12812def TF_RetrieveTPUEmbeddingADAMParametersOp : TF_Op<"RetrieveTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
12813  let summary = "Retrieve ADAM embedding parameters.";
12814
12815  let description = [{
12816An op that retrieves optimization parameters from embedding to host
12817memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12818the correct embedding table configuration. For example, this op is
12819used to retrieve updated parameters before saving a checkpoint.
12820  }];
12821
12822  let arguments = (ins
12823    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12824    DefaultValuedAttr<StrAttr, "">:$table_name,
12825    I64Attr:$num_shards,
12826    I64Attr:$shard_id,
12827    DefaultValuedAttr<StrAttr, "">:$config
12828  );
12829
12830  let results = (outs
12831    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
12832    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
12833    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities
12834  );
12835}
12836
12837def TF_RetrieveTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12838  let summary = "";
12839
12840  let arguments = (ins
12841    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12842    DefaultValuedAttr<StrAttr, "">:$table_name,
12843    I64Attr:$num_shards,
12844    I64Attr:$shard_id,
12845    DefaultValuedAttr<StrAttr, "">:$config
12846  );
12847
12848  let results = (outs
12849    TF_Float32Tensor:$parameters,
12850    TF_Float32Tensor:$momenta,
12851    TF_Float32Tensor:$velocities,
12852    TF_Float32Tensor:$gradient_accumulators
12853  );
12854}
12855
12856def TF_RetrieveTPUEmbeddingAdadeltaParametersOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
12857  let summary = "Retrieve Adadelta embedding parameters.";
12858
12859  let description = [{
12860An op that retrieves optimization parameters from embedding to host
12861memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12862the correct embedding table configuration. For example, this op is
12863used to retrieve updated parameters before saving a checkpoint.
12864  }];
12865
12866  let arguments = (ins
12867    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12868    DefaultValuedAttr<StrAttr, "">:$table_name,
12869    I64Attr:$num_shards,
12870    I64Attr:$shard_id,
12871    DefaultValuedAttr<StrAttr, "">:$config
12872  );
12873
12874  let results = (outs
12875    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
12876    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
12877    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates
12878  );
12879}
12880
12881def TF_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12882  let summary = "";
12883
12884  let arguments = (ins
12885    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12886    DefaultValuedAttr<StrAttr, "">:$table_name,
12887    I64Attr:$num_shards,
12888    I64Attr:$shard_id,
12889    DefaultValuedAttr<StrAttr, "">:$config
12890  );
12891
12892  let results = (outs
12893    TF_Float32Tensor:$parameters,
12894    TF_Float32Tensor:$accumulators,
12895    TF_Float32Tensor:$updates,
12896    TF_Float32Tensor:$gradient_accumulators
12897  );
12898}
12899
12900def TF_RetrieveTPUEmbeddingAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
12901  let summary = "Retrieve Adagrad embedding parameters.";
12902
12903  let description = [{
12904An op that retrieves optimization parameters from embedding to host
12905memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12906the correct embedding table configuration. For example, this op is
12907used to retrieve updated parameters before saving a checkpoint.
12908  }];
12909
12910  let arguments = (ins
12911    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12912    DefaultValuedAttr<StrAttr, "">:$table_name,
12913    I64Attr:$num_shards,
12914    I64Attr:$shard_id,
12915    DefaultValuedAttr<StrAttr, "">:$config
12916  );
12917
12918  let results = (outs
12919    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
12920    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators
12921  );
12922}
12923
12924def TF_RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12925  let summary = "";
12926
12927  let arguments = (ins
12928    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12929    DefaultValuedAttr<StrAttr, "">:$table_name,
12930    I64Attr:$num_shards,
12931    I64Attr:$shard_id,
12932    DefaultValuedAttr<StrAttr, "">:$config
12933  );
12934
12935  let results = (outs
12936    TF_Float32Tensor:$parameters,
12937    TF_Float32Tensor:$accumulators,
12938    TF_Float32Tensor:$gradient_accumulators
12939  );
12940}
12941
12942def TF_RetrieveTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
12943  let summary = "Retrieve centered RMSProp embedding parameters.";
12944
12945  let description = [{
12946An op that retrieves optimization parameters from embedding to host
12947memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12948the correct embedding table configuration. For example, this op is
12949used to retrieve updated parameters before saving a checkpoint.
12950  }];
12951
12952  let arguments = (ins
12953    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12954    DefaultValuedAttr<StrAttr, "">:$table_name,
12955    I64Attr:$num_shards,
12956    I64Attr:$shard_id,
12957    DefaultValuedAttr<StrAttr, "">:$config
12958  );
12959
12960  let results = (outs
12961    Res<TF_Float32Tensor, [{Parameter parameters updated by the centered RMSProp optimization algorithm.}]>:$parameters,
12962    Res<TF_Float32Tensor, [{Parameter ms updated by the centered RMSProp optimization algorithm.}]>:$ms,
12963    Res<TF_Float32Tensor, [{Parameter mom updated by the centered RMSProp optimization algorithm.}]>:$mom,
12964    Res<TF_Float32Tensor, [{Parameter mg updated by the centered RMSProp optimization algorithm.}]>:$mg
12965  );
12966}
12967
12968def TF_RetrieveTPUEmbeddingFTRLParametersOp : TF_Op<"RetrieveTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
12969  let summary = "Retrieve FTRL embedding parameters.";
12970
12971  let description = [{
12972An op that retrieves optimization parameters from embedding to host
12973memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12974the correct embedding table configuration. For example, this op is
12975used to retrieve updated parameters before saving a checkpoint.
12976  }];
12977
12978  let arguments = (ins
12979    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12980    DefaultValuedAttr<StrAttr, "">:$table_name,
12981    I64Attr:$num_shards,
12982    I64Attr:$shard_id,
12983    DefaultValuedAttr<StrAttr, "">:$config
12984  );
12985
12986  let results = (outs
12987    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
12988    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
12989    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears
12990  );
12991}
12992
12993def TF_RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12994  let summary = "";
12995
12996  let arguments = (ins
12997    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12998    DefaultValuedAttr<StrAttr, "">:$table_name,
12999    I64Attr:$num_shards,
13000    I64Attr:$shard_id,
13001    DefaultValuedAttr<StrAttr, "">:$config
13002  );
13003
13004  let results = (outs
13005    TF_Float32Tensor:$parameters,
13006    TF_Float32Tensor:$accumulators,
13007    TF_Float32Tensor:$linears,
13008    TF_Float32Tensor:$gradient_accumulators
13009  );
13010}
13011
13012def TF_RetrieveTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"RetrieveTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
13013  let summary = "Retrieve MDL Adagrad Light embedding parameters.";
13014
13015  let description = [{
13016An op that retrieves optimization parameters from embedding to host
13017memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
13018the correct embedding table configuration. For example, this op is
13019used to retrieve updated parameters before saving a checkpoint.
13020  }];
13021
13022  let arguments = (ins
13023    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13024    DefaultValuedAttr<StrAttr, "">:$table_name,
13025    I64Attr:$num_shards,
13026    I64Attr:$shard_id,
13027    DefaultValuedAttr<StrAttr, "">:$config
13028  );
13029
13030  let results = (outs
13031    Res<TF_Float32Tensor, [{Parameter parameters updated by the MDL Adagrad Light optimization algorithm.}]>:$parameters,
13032    Res<TF_Float32Tensor, [{Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
13033    Res<TF_Float32Tensor, [{Parameter weights updated by the MDL Adagrad Light optimization algorithm.}]>:$weights,
13034    Res<TF_Float32Tensor, [{Parameter benefits updated by the MDL Adagrad Light optimization algorithm.}]>:$benefits
13035  );
13036}
13037
13038def TF_RetrieveTPUEmbeddingMomentumParametersOp : TF_Op<"RetrieveTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
13039  let summary = "Retrieve Momentum embedding parameters.";
13040
13041  let description = [{
13042An op that retrieves optimization parameters from embedding to host
13043memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
13044the correct embedding table configuration. For example, this op is
13045used to retrieve updated parameters before saving a checkpoint.
13046  }];
13047
13048  let arguments = (ins
13049    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13050    DefaultValuedAttr<StrAttr, "">:$table_name,
13051    I64Attr:$num_shards,
13052    I64Attr:$shard_id,
13053    DefaultValuedAttr<StrAttr, "">:$config
13054  );
13055
13056  let results = (outs
13057    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
13058    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta
13059  );
13060}
13061
13062def TF_RetrieveTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
13063  let summary = "";
13064
13065  let arguments = (ins
13066    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13067    DefaultValuedAttr<StrAttr, "">:$table_name,
13068    I64Attr:$num_shards,
13069    I64Attr:$shard_id,
13070    DefaultValuedAttr<StrAttr, "">:$config
13071  );
13072
13073  let results = (outs
13074    TF_Float32Tensor:$parameters,
13075    TF_Float32Tensor:$momenta,
13076    TF_Float32Tensor:$gradient_accumulators
13077  );
13078}
13079
13080def TF_RetrieveTPUEmbeddingProximalAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
13081  let summary = "Retrieve proximal Adagrad embedding parameters.";
13082
13083  let description = [{
13084An op that retrieves optimization parameters from embedding to host
13085memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
13086the correct embedding table configuration. For example, this op is
13087used to retrieve updated parameters before saving a checkpoint.
13088  }];
13089
13090  let arguments = (ins
13091    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13092    DefaultValuedAttr<StrAttr, "">:$table_name,
13093    I64Attr:$num_shards,
13094    I64Attr:$shard_id,
13095    DefaultValuedAttr<StrAttr, "">:$config
13096  );
13097
13098  let results = (outs
13099    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
13100    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators
13101  );
13102}
13103
13104def TF_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
13105  let summary = "";
13106
13107  let arguments = (ins
13108    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13109    DefaultValuedAttr<StrAttr, "">:$table_name,
13110    I64Attr:$num_shards,
13111    I64Attr:$shard_id,
13112    DefaultValuedAttr<StrAttr, "">:$config
13113  );
13114
13115  let results = (outs
13116    TF_Float32Tensor:$parameters,
13117    TF_Float32Tensor:$accumulators,
13118    TF_Float32Tensor:$gradient_accumulators
13119  );
13120}
13121
13122def TF_RetrieveTPUEmbeddingProximalYogiParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
13123  let summary = "";
13124
13125  let arguments = (ins
13126    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13127    DefaultValuedAttr<StrAttr, "">:$table_name,
13128    I64Attr:$num_shards,
13129    I64Attr:$shard_id,
13130    DefaultValuedAttr<StrAttr, "">:$config
13131  );
13132
13133  let results = (outs
13134    TF_Float32Tensor:$parameters,
13135    TF_Float32Tensor:$v,
13136    TF_Float32Tensor:$m
13137  );
13138}
13139
13140def TF_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
13141  let summary = "";
13142
13143  let arguments = (ins
13144    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13145    DefaultValuedAttr<StrAttr, "">:$table_name,
13146    I64Attr:$num_shards,
13147    I64Attr:$shard_id,
13148    DefaultValuedAttr<StrAttr, "">:$config
13149  );
13150
13151  let results = (outs
13152    TF_Float32Tensor:$parameters,
13153    TF_Float32Tensor:$v,
13154    TF_Float32Tensor:$m,
13155    TF_Float32Tensor:$gradient_accumulators
13156  );
13157}
13158
13159def TF_RetrieveTPUEmbeddingRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
13160  let summary = "Retrieve RMSProp embedding parameters.";
13161
13162  let description = [{
13163An op that retrieves optimization parameters from embedding to host
13164memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
13165the correct embedding table configuration. For example, this op is
13166used to retrieve updated parameters before saving a checkpoint.
13167  }];
13168
13169  let arguments = (ins
13170    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13171    DefaultValuedAttr<StrAttr, "">:$table_name,
13172    I64Attr:$num_shards,
13173    I64Attr:$shard_id,
13174    DefaultValuedAttr<StrAttr, "">:$config
13175  );
13176
13177  let results = (outs
13178    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
13179    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
13180    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom
13181  );
13182}
13183
13184def TF_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
13185  let summary = "";
13186
13187  let arguments = (ins
13188    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13189    DefaultValuedAttr<StrAttr, "">:$table_name,
13190    I64Attr:$num_shards,
13191    I64Attr:$shard_id,
13192    DefaultValuedAttr<StrAttr, "">:$config
13193  );
13194
13195  let results = (outs
13196    TF_Float32Tensor:$parameters,
13197    TF_Float32Tensor:$ms,
13198    TF_Float32Tensor:$mom,
13199    TF_Float32Tensor:$gradient_accumulators
13200  );
13201}
13202
13203def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
13204  let summary = "Retrieve SGD embedding parameters.";
13205
13206  let description = [{
13207An op that retrieves optimization parameters from embedding to host
13208memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
13209the correct embedding table configuration. For example, this op is
13210used to retrieve updated parameters before saving a checkpoint.
13211  }];
13212
13213  let arguments = (ins
13214    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13215    DefaultValuedAttr<StrAttr, "">:$table_name,
13216    I64Attr:$num_shards,
13217    I64Attr:$shard_id,
13218    DefaultValuedAttr<StrAttr, "">:$config
13219  );
13220
13221  let results = (outs
13222    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters
13223  );
13224}
13225
13226def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
13227  let summary = "";
13228
13229  let arguments = (ins
13230    DefaultValuedAttr<I64Attr, "-1">:$table_id,
13231    DefaultValuedAttr<StrAttr, "">:$table_name,
13232    I64Attr:$num_shards,
13233    I64Attr:$shard_id,
13234    DefaultValuedAttr<StrAttr, "">:$config
13235  );
13236
13237  let results = (outs
13238    TF_Float32Tensor:$parameters,
13239    TF_Float32Tensor:$gradient_accumulators
13240  );
13241}
13242
13243def TF_ReverseOp : TF_Op<"Reverse", [NoSideEffect]> {
13244  let summary = "Reverses specific dimensions of a tensor.";
13245
13246  let description = [{
13247Given a `tensor`, and a `bool` tensor `dims` representing the dimensions
13248of `tensor`, this operation reverses each dimension i of `tensor` where
13249`dims[i]` is `True`.
13250
13251`tensor` can have up to 8 dimensions. The number of dimensions
13252of `tensor` must equal the number of elements in `dims`. In other words:
13253
13254`rank(tensor) = size(dims)`
13255
13256For example:
13257
13258```
13259# tensor 't' is [[[[ 0,  1,  2,  3],
13260#                  [ 4,  5,  6,  7],
13261#                  [ 8,  9, 10, 11]],
13262#                 [[12, 13, 14, 15],
13263#                  [16, 17, 18, 19],
13264#                  [20, 21, 22, 23]]]]
13265# tensor 't' shape is [1, 2, 3, 4]
13266
13267# 'dims' is [False, False, False, True]
13268reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
13269                        [ 7,  6,  5,  4],
13270                        [ 11, 10, 9, 8]],
13271                       [[15, 14, 13, 12],
13272                        [19, 18, 17, 16],
13273                        [23, 22, 21, 20]]]]
13274
13275# 'dims' is [False, True, False, False]
13276reverse(t, dims) ==> [[[[12, 13, 14, 15],
13277                        [16, 17, 18, 19],
13278                        [20, 21, 22, 23]
13279                       [[ 0,  1,  2,  3],
13280                        [ 4,  5,  6,  7],
13281                        [ 8,  9, 10, 11]]]]
13282
13283# 'dims' is [False, False, True, False]
13284reverse(t, dims) ==> [[[[8, 9, 10, 11],
13285                        [4, 5, 6, 7],
13286                        [0, 1, 2, 3]]
13287                       [[20, 21, 22, 23],
13288                        [16, 17, 18, 19],
13289                        [12, 13, 14, 15]]]]
13290```
13291  }];
13292
13293  let arguments = (ins
13294    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Up to 8-D.}]>:$tensor,
13295    Arg<TF_BoolTensor, [{1-D. The dimensions to reverse.}]>:$dims
13296  );
13297
13298  let results = (outs
13299    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The same shape as `tensor`.}]>:$output
13300  );
13301
13302  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13303}
13304
13305def TF_ReverseSequenceOp : TF_Op<"ReverseSequence", [NoSideEffect]> {
13306  let summary = "Reverses variable length slices.";
13307
13308  let description = [{
13309This op first slices `input` along the dimension `batch_dim`, and for each
13310slice `i`, reverses the first `seq_lengths[i]` elements along
13311the dimension `seq_dim`.
13312
13313The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
13314and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
13315
13316The output slice `i` along dimension `batch_dim` is then given by input
13317slice `i`, with the first `seq_lengths[i]` slices along dimension
13318`seq_dim` reversed.
13319
13320For example:
13321
13322```
13323# Given this:
13324batch_dim = 0
13325seq_dim = 1
13326input.dims = (4, 8, ...)
13327seq_lengths = [7, 2, 3, 5]
13328
13329# then slices of input are reversed on seq_dim, but only up to seq_lengths:
13330output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
13331output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
13332output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
13333output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
13334
13335# while entries past seq_lens are copied through:
13336output[0, 7:, :, ...] = input[0, 7:, :, ...]
13337output[1, 2:, :, ...] = input[1, 2:, :, ...]
13338output[2, 3:, :, ...] = input[2, 3:, :, ...]
13339output[3, 2:, :, ...] = input[3, 2:, :, ...]
13340```
13341
13342In contrast, if:
13343
13344```
13345# Given this:
13346batch_dim = 2
13347seq_dim = 0
13348input.dims = (8, ?, 4, ...)
13349seq_lengths = [7, 2, 3, 5]
13350
13351# then slices of input are reversed on seq_dim, but only up to seq_lengths:
13352output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
13353output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
13354output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
13355output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
13356
13357# while entries past seq_lens are copied through:
13358output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
13359output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
13360output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
13361output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
13362```
13363  }];
13364
13365  let arguments = (ins
13366    Arg<TF_Tensor, [{The input to reverse.}]>:$input,
13367    Arg<TF_I32OrI64Tensor, [{1-D with length `input.dims(batch_dim)` and
13368`max(seq_lengths) <= input.dims(seq_dim)`}]>:$seq_lengths,
13369
13370    I64Attr:$seq_dim,
13371    DefaultValuedAttr<I64Attr, "0">:$batch_dim
13372  );
13373
13374  let results = (outs
13375    Res<TF_Tensor, [{The partially reversed input. It has the same shape as `input`.}]>:$output
13376  );
13377
13378  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13379  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
13380}
13381
13382def TF_ReverseV2Op : TF_Op<"ReverseV2", [NoSideEffect]> {
13383  let summary = "Reverses specific dimensions of a tensor.";
13384
13385  let description = [{
13386Given a `tensor`, and a `int32` tensor `axis` representing the set of
13387dimensions of `tensor` to reverse. This operation reverses each dimension
13388`i` for which there exists `j` s.t. `axis[j] == i`.
13389
13390`tensor` can have up to 8 dimensions. The number of dimensions specified
13391in `axis` may be 0 or more entries. If an index is specified more than
13392once, a InvalidArgument error is raised.
13393
13394For example:
13395
13396```
13397# tensor 't' is [[[[ 0,  1,  2,  3],
13398#                  [ 4,  5,  6,  7],
13399#                  [ 8,  9, 10, 11]],
13400#                 [[12, 13, 14, 15],
13401#                  [16, 17, 18, 19],
13402#                  [20, 21, 22, 23]]]]
13403# tensor 't' shape is [1, 2, 3, 4]
13404
13405# 'dims' is [3] or 'dims' is [-1]
13406reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
13407                        [ 7,  6,  5,  4],
13408                        [ 11, 10, 9, 8]],
13409                       [[15, 14, 13, 12],
13410                        [19, 18, 17, 16],
13411                        [23, 22, 21, 20]]]]
13412
13413# 'dims' is '[1]' (or 'dims' is '[-3]')
13414reverse(t, dims) ==> [[[[12, 13, 14, 15],
13415                        [16, 17, 18, 19],
13416                        [20, 21, 22, 23]
13417                       [[ 0,  1,  2,  3],
13418                        [ 4,  5,  6,  7],
13419                        [ 8,  9, 10, 11]]]]
13420
13421# 'dims' is '[2]' (or 'dims' is '[-2]')
13422reverse(t, dims) ==> [[[[8, 9, 10, 11],
13423                        [4, 5, 6, 7],
13424                        [0, 1, 2, 3]]
13425                       [[20, 21, 22, 23],
13426                        [16, 17, 18, 19],
13427                        [12, 13, 14, 15]]]]
13428```
13429  }];
13430
13431  let arguments = (ins
13432    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Up to 8-D.}]>:$tensor,
13433    Arg<TF_I32OrI64Tensor, [{1-D. The indices of the dimensions to reverse. Must be in the range
13434`[-rank(tensor), rank(tensor))`.}]>:$axis
13435  );
13436
13437  let results = (outs
13438    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The same shape as `tensor`.}]>:$output
13439  );
13440
13441  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13442  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
13443}
13444
13445def TF_RightShiftOp : TF_Op<"RightShift", [NoSideEffect, ResultsBroadcastableShape]>,
13446                      WithBroadcastableBinOpBuilder {
13447  let summary = "Elementwise computes the bitwise right-shift of `x` and `y`.";
13448
13449  let description = [{
13450Performs a logical shift for unsigned integer types, and an arithmetic shift
13451for signed integer types.
13452
13453If `y` is negative, or greater than or equal to than the width of `x` in bits
13454the result is implementation defined.
13455
13456Example:
13457
13458```python
13459import tensorflow as tf
13460from tensorflow.python.ops import bitwise_ops
13461import numpy as np
13462dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
13463
13464for dtype in dtype_list:
13465  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
13466  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
13467
13468  right_shift_result = bitwise_ops.right_shift(lhs, rhs)
13469
13470  print(right_shift_result)
13471
13472# This will print:
13473# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
13474# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
13475# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
13476# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
13477
13478lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
13479rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
13480bitwise_ops.right_shift(lhs, rhs)
13481# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
13482```
13483  }];
13484
13485  let arguments = (ins
13486    TF_IntTensor:$x,
13487    TF_IntTensor:$y
13488  );
13489
13490  let results = (outs
13491    TF_IntTensor:$z
13492  );
13493
13494  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13495}
13496
13497def TF_RintOp : TF_Op<"Rint", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
13498  let summary = "Returns element-wise integer closest to x.";
13499
13500  let description = [{
13501If the result is midway between two representable values,
13502the even representable is chosen.
13503For example:
13504
13505```
13506rint(-1.5) ==> -2.0
13507rint(0.5000001) ==> 1.0
13508rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
13509```
13510  }];
13511
13512  let arguments = (ins
13513    TF_FloatTensor:$x
13514  );
13515
13516  let results = (outs
13517    TF_FloatTensor:$y
13518  );
13519
13520  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13521}
13522
13523def TF_RiscAddOp : TF_Op<"RiscAdd", [Commutative, NoSideEffect]> {
13524  let summary = "Returns x + y element-wise.";
13525
13526  let description = [{
13527*NOTE*: `RiscAdd` does not supports broadcasting.
13528
13529Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
13530
13531Both input and output have a range `(-inf, inf)`.
13532  }];
13533
13534  let arguments = (ins
13535    TF_FloatTensor:$x,
13536    TF_FloatTensor:$y
13537  );
13538
13539  let results = (outs
13540    TF_FloatTensor:$z
13541  );
13542
13543  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13544}
13545
13546def TF_RiscDotOp : TF_Op<"RiscDot", [NoSideEffect]> {
13547  let summary = "";
13548
13549  let arguments = (ins
13550    TF_FloatTensor:$a,
13551    TF_FloatTensor:$b,
13552
13553    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
13554    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
13555  );
13556
13557  let results = (outs
13558    TF_FloatTensor:$product
13559  );
13560
13561  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13562}
13563
13564def TF_RngReadAndSkipOp : TF_Op<"RngReadAndSkip", []> {
13565  let summary = "Advance the counter of a counter-based RNG.";
13566
13567  let description = [{
13568The state of the RNG after
13569`rng_read_and_skip(n)` will be the same as that after `uniform([n])`
13570(or any other distribution). The actual increment added to the
13571counter is an unspecified implementation choice.
13572  }];
13573
13574  let arguments = (ins
13575    Arg<TF_ResourceTensor, [{The handle of the resource variable that stores the state of the RNG.}]>:$resource,
13576    Arg<TF_Int32Tensor, [{The RNG algorithm.}]>:$alg,
13577    Arg<TF_Uint64Tensor, [{The amount of advancement.}]>:$delta
13578  );
13579
13580  let results = (outs
13581    Res<TF_Int64Tensor, [{The old value of the resource variable, before incrementing. Since state size is algorithm-dependent, this output will be right-padded with zeros to reach shape int64[3] (the current maximal state size among algorithms).}]>:$value
13582  );
13583}
13584
13585def TF_RollOp : TF_Op<"Roll", [NoSideEffect]> {
13586  let summary = "Rolls the elements of a tensor along an axis.";
13587
13588  let description = [{
13589The elements are shifted positively (towards larger indices) by the offset of
13590`shift` along the dimension of `axis`. Negative `shift` values will shift
13591elements in the opposite direction. Elements that roll passed the last position
13592will wrap around to the first and vice versa. Multiple shifts along multiple
13593axes may be specified.
13594
13595For example:
13596
13597```
13598# 't' is [0, 1, 2, 3, 4]
13599roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
13600
13601# shifting along multiple dimensions
13602# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
13603roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
13604
13605# shifting along the same axis multiple times
13606# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
13607roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
13608```
13609  }];
13610
13611  let arguments = (ins
13612    TF_Tensor:$input,
13613    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
13614elements are shifted positively (towards larger indices) along the dimension
13615specified by `axis[i]`. Negative shifts will roll the elements in the opposite
13616direction.}]>:$shift,
13617    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
13618`shift[i]` should occur. If the same axis is referenced more than once, the
13619total shift for that axis will be the sum of all the shifts that belong to that
13620axis.}]>:$axis
13621  );
13622
13623  let results = (outs
13624    Res<TF_Tensor, [{Has the same shape and size as the input. The elements are shifted
13625positively (towards larger indices) by the offsets of `shift` along the
13626dimensions of `axis`.}]>:$output
13627  );
13628
13629  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13630  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
13631  TF_DerivedOperandTypeAttr Tshift = TF_DerivedOperandTypeAttr<1>;
13632}
13633
13634def TF_RoundOp : TF_Op<"Round", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
13635  let summary = [{
13636Rounds the values of a tensor to the nearest integer, element-wise.
13637  }];
13638
13639  let description = [{
13640Rounds half to even.  Also known as bankers rounding. If you want to round
13641according to the current system rounding mode use std::cint.
13642  }];
13643
13644  let arguments = (ins
13645    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
13646  );
13647
13648  let results = (outs
13649    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
13650  );
13651
13652  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13653}
13654
13655def TF_RsqrtOp : TF_Op<"Rsqrt", [NoSideEffect, SameOperandsAndResultType]> {
13656  let summary = "Computes reciprocal of square root of x element-wise.";
13657
13658  let description = [{
13659I.e., \\(y = 1 / \sqrt{x}\\).
13660  }];
13661
13662  let arguments = (ins
13663    TF_FpOrComplexTensor:$x
13664  );
13665
13666  let results = (outs
13667    TF_FpOrComplexTensor:$y
13668  );
13669
13670  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13671}
13672
13673def TF_RsqrtGradOp : TF_Op<"RsqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13674  let summary = "Computes the gradient for the rsqrt of `x` wrt its input.";
13675
13676  let description = [{
13677Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
13678is the corresponding input gradient.
13679  }];
13680
13681  let arguments = (ins
13682    TF_FpOrComplexTensor:$y,
13683    TF_FpOrComplexTensor:$dy
13684  );
13685
13686  let results = (outs
13687    TF_FpOrComplexTensor:$z
13688  );
13689
13690  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13691}
13692
13693def TF_SaveOp : TF_Op<"Save", []> {
13694  let summary = "Saves the input tensors to disk.";
13695
13696  let description = [{
13697The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
13698is written to `filename` with name `tensor_names[i]`.
13699
13700See also `SaveSlices`.
13701  }];
13702
13703  let arguments = (ins
13704    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write
13705the tensor.}]>:$filename,
13706    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
13707    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
13708  );
13709
13710  let results = (outs);
13711
13712  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<2>;
13713}
13714
13715def TF_SaveSlicesOp : TF_Op<"SaveSlices", []> {
13716  let summary = "Saves input tensors slices to disk.";
13717
13718  let description = [{
13719This is like `Save` except that tensors can be listed in the saved file as being
13720a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
13721larger tensor and the slice that this tensor covers. `shapes_and_slices` must
13722have as many elements as `tensor_names`.
13723
13724Elements of the `shapes_and_slices` input must either be:
13725
13726*  The empty string, in which case the corresponding tensor is
13727   saved normally.
13728*  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
13729   `dimI` are the dimensions of the larger tensor and `slice-spec`
13730   specifies what part is covered by the tensor to save.
13731
13732`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
13733where each `sliceI` is either:
13734
13735*  The string `-` meaning that the slice covers all indices of this dimension
13736*  `start,length` where `start` and `length` are integers.  In that
13737   case the slice covers `length` indices starting at `start`.
13738
13739See also `Save`.
13740  }];
13741
13742  let arguments = (ins
13743    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write the
13744tensor.}]>:$filename,
13745    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
13746    Arg<TF_StrTensor, [{Shape `[N]`.  The shapes and slice specifications to use when
13747saving the tensors.}]>:$shapes_and_slices,
13748    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
13749  );
13750
13751  let results = (outs);
13752
13753  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<3>;
13754}
13755
13756def TF_SaveV2Op : TF_Op<"SaveV2", []> {
13757  let summary = "Saves tensors in V2 checkpoint format.";
13758
13759  let description = [{
13760By default, saves the named tensors in full.  If the caller wishes to save
13761specific slices of full tensors, "shape_and_slices" should be non-empty strings
13762and correspondingly well-formed.
13763  }];
13764
13765  let arguments = (ins
13766    Arg<TF_StrTensor, [{Must have a single element. The prefix of the V2 checkpoint to which we
13767write the tensors.}]>:$prefix,
13768    Arg<TF_StrTensor, [{shape {N}. The names of the tensors to be saved.}]>:$tensor_names,
13769    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be saved.
13770Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices,
13771    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$tensors
13772  );
13773
13774  let results = (outs);
13775
13776  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<3>;
13777}
13778
13779def TF_ScatterNdOp : TF_Op<"ScatterNd", [NoSideEffect]> {
13780  let summary = [{
13781Scatters `updates` into a tensor of shape `shape` according to `indices`.
13782  }];
13783
13784  let description = [{
13785Update the input tensor by scattering sparse `updates` according to individual values at the specified `indices`.
13786This op returns an `output` tensor with the `shape` you specify. This op is the
13787inverse of the `tf.gather_nd` operator which extracts values or slices from a
13788given tensor.
13789
13790This operation is similar to `tf.tensor_scatter_add`, except that the tensor is
13791zero-initialized. Calling `tf.scatter_nd(indices, values, shape)`
13792is identical to calling
13793`tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`.
13794
13795If `indices` contains duplicates, the duplicate `values` are accumulated
13796(summed).
13797
13798**WARNING**: The order in which updates are applied is nondeterministic, so the
13799output will be nondeterministic if `indices` contains duplicates;
13800numbers summed in different order may yield different results because of some
13801numerical approximation issues.
13802
13803`indices` is an integer tensor of shape `shape`. The last dimension
13804of `indices` can be at most the rank of `shape`:
13805
13806    indices.shape[-1] <= shape.rank
13807
13808The last dimension of `indices` corresponds to indices of elements
13809(if `indices.shape[-1] = shape.rank`) or slices
13810(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
13811`shape`.
13812
13813`updates` is a tensor with shape:
13814
13815    indices.shape[:-1] + shape[indices.shape[-1]:]
13816
13817The simplest form of the scatter op is to insert individual elements in
13818a tensor by index. Consider an example where you want to insert 4 scattered
13819elements in a rank-1 tensor with 8 elements.
13820
13821<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13822<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
13823</div>
13824
13825In Python, this scatter operation would look like this:
13826
13827```python
13828    indices = tf.constant([[4], [3], [1], [7]])
13829    updates = tf.constant([9, 10, 11, 12])
13830    shape = tf.constant([8])
13831    scatter = tf.scatter_nd(indices, updates, shape)
13832    print(scatter)
13833```
13834
13835The resulting tensor would look like this:
13836
13837    [0, 11, 0, 10, 9, 0, 0, 12]
13838
13839You can also insert entire slices of a higher rank tensor all at once. For
13840example, you can insert two slices in the first dimension of a rank-3 tensor
13841with two matrices of new values.
13842
13843<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13844<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
13845</div>
13846
13847In Python, this scatter operation would look like this:
13848
13849```python
13850    indices = tf.constant([[0], [2]])
13851    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
13852                            [7, 7, 7, 7], [8, 8, 8, 8]],
13853                           [[5, 5, 5, 5], [6, 6, 6, 6],
13854                            [7, 7, 7, 7], [8, 8, 8, 8]]])
13855    shape = tf.constant([4, 4, 4])
13856    scatter = tf.scatter_nd(indices, updates, shape)
13857    print(scatter)
13858```
13859
13860The resulting tensor would look like this:
13861
13862    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
13863     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
13864     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
13865     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
13866
13867Note that on CPU, if an out of bound index is found, an error is returned.
13868On GPU, if an out of bound index is found, the index is ignored.
13869  }];
13870
13871  let arguments = (ins
13872    Arg<TF_I32OrI64Tensor, [{Tensor of indices.}]>:$indices,
13873    Arg<TF_Tensor, [{Values to scatter into the output tensor.}]>:$updates,
13874    Arg<TF_I32OrI64Tensor, [{1-D. The shape of the output tensor.}]>:$shape
13875  );
13876
13877  let results = (outs
13878    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
13879to the indices.}]>:$output
13880  );
13881
13882  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13883  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
13884}
13885
13886def TF_SegmentMaxOp : TF_Op<"SegmentMax", [NoSideEffect]> {
13887  let summary = "Computes the maximum along segments of a tensor.";
13888
13889  let description = [{
13890Read
13891[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13892for an explanation of segments.
13893
13894Computes a tensor such that
13895\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
13896that `segment_ids[j] == i`.
13897
13898If the max is empty for a given segment ID `i`, `output[i] = 0`.
13899
13900<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13901<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
13902</div>
13903
13904For example:
13905
13906```
13907c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13908tf.segment_max(c, tf.constant([0, 0, 1]))
13909# ==> [[4, 3, 3, 4],
13910#      [5, 6, 7, 8]]
13911```
13912  }];
13913
13914  let arguments = (ins
13915    TF_IntOrFpTensor:$data,
13916    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13917first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13918  );
13919
13920  let results = (outs
13921    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
13922has size `k`, the number of segments.}]>:$output
13923  );
13924
13925  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13926  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13927}
13928
13929def TF_SegmentMeanOp : TF_Op<"SegmentMean", [NoSideEffect]> {
13930  let summary = "Computes the mean along segments of a tensor.";
13931
13932  let description = [{
13933Read
13934[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13935for an explanation of segments.
13936
13937Computes a tensor such that
13938\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
13939over `j` such that `segment_ids[j] == i` and `N` is the total number of
13940values summed.
13941
13942If the mean is empty for a given segment ID `i`, `output[i] = 0`.
13943
13944<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13945<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
13946</div>
13947
13948For example:
13949
13950```
13951c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13952tf.segment_mean(c, tf.constant([0, 0, 1]))
13953# ==> [[2.5, 2.5, 2.5, 2.5],
13954#      [5, 6, 7, 8]]
13955```
13956  }];
13957
13958  let arguments = (ins
13959    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13960    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13961first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13962  );
13963
13964  let results = (outs
13965    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13966has size `k`, the number of segments.}]>:$output
13967  );
13968
13969  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13970  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13971}
13972
13973def TF_SegmentMinOp : TF_Op<"SegmentMin", [NoSideEffect]> {
13974  let summary = "Computes the minimum along segments of a tensor.";
13975
13976  let description = [{
13977Read
13978[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13979for an explanation of segments.
13980
13981Computes a tensor such that
13982\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
13983that `segment_ids[j] == i`.
13984
13985If the min is empty for a given segment ID `i`, `output[i] = 0`.
13986
13987<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13988<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
13989</div>
13990
13991For example:
13992
13993```
13994c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13995tf.segment_min(c, tf.constant([0, 0, 1]))
13996# ==> [[1, 2, 2, 1],
13997#      [5, 6, 7, 8]]
13998```
13999  }];
14000
14001  let arguments = (ins
14002    TF_IntOrFpTensor:$data,
14003    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
14004first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
14005  );
14006
14007  let results = (outs
14008    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
14009has size `k`, the number of segments.}]>:$output
14010  );
14011
14012  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14013  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
14014}
14015
14016def TF_SegmentProdOp : TF_Op<"SegmentProd", [NoSideEffect]> {
14017  let summary = "Computes the product along segments of a tensor.";
14018
14019  let description = [{
14020Read
14021[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
14022for an explanation of segments.
14023
14024Computes a tensor such that
14025\\(output_i = \prod_j data_j\\) where the product is over `j` such
14026that `segment_ids[j] == i`.
14027
14028If the product is empty for a given segment ID `i`, `output[i] = 1`.
14029
14030<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
14031<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
14032</div>
14033
14034For example:
14035
14036```
14037c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
14038tf.segment_prod(c, tf.constant([0, 0, 1]))
14039# ==> [[4, 6, 6, 4],
14040#      [5, 6, 7, 8]]
14041```
14042  }];
14043
14044  let arguments = (ins
14045    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
14046    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
14047first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
14048  );
14049
14050  let results = (outs
14051    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
14052has size `k`, the number of segments.}]>:$output
14053  );
14054
14055  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14056  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
14057}
14058
14059def TF_SegmentSumOp : TF_Op<"SegmentSum", [NoSideEffect]> {
14060  let summary = "Computes the sum along segments of a tensor.";
14061
14062  let description = [{
14063Read
14064[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
14065for an explanation of segments.
14066
14067Computes a tensor such that
14068\\(output_i = \sum_j data_j\\) where sum is over `j` such
14069that `segment_ids[j] == i`.
14070
14071If the sum is empty for a given segment ID `i`, `output[i] = 0`.
14072
14073<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
14074<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
14075</div>
14076
14077For example:
14078
14079```
14080c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
14081tf.segment_sum(c, tf.constant([0, 0, 1]))
14082# ==> [[5, 5, 5, 5],
14083#      [5, 6, 7, 8]]
14084```
14085  }];
14086
14087  let arguments = (ins
14088    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
14089    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
14090first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
14091  );
14092
14093  let results = (outs
14094    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
14095has size `k`, the number of segments.}]>:$output
14096  );
14097
14098  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14099  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
14100}
14101
14102def TF_SelectOp : TF_Op<"Select", [NoSideEffect]> {
14103  let summary = "Selects elements from `x` or `y`, depending on `condition`.";
14104
14105  let description = [{
14106The `x`, and `y` tensors must all have the same shape, and the
14107output will also have that shape.
14108
14109The `condition` tensor must be a scalar if `x` and `y` are scalars.
14110If `x` and `y` are vectors or higher rank, then `condition` must be either a
14111scalar, a vector with size matching the first dimension of `x`, or must have
14112the same shape as `x`.
14113
14114The `condition` tensor acts as a mask that chooses, based on the value at each
14115element, whether the corresponding element / row in the output should be
14116taken from `x` (if true) or `y` (if false).
14117
14118If `condition` is a vector and `x` and `y` are higher rank matrices, then
14119it chooses which row (outer dimension) to copy from `x` and `y`.
14120If `condition` has the same shape as `x` and `y`, then it chooses which
14121element to copy from `x` and `y`.
14122
14123For example:
14124
14125```python
14126# 'condition' tensor is [[True,  False]
14127#                        [False, True]]
14128# 't' is [[1, 2],
14129#         [3, 4]]
14130# 'e' is [[5, 6],
14131#         [7, 8]]
14132select(condition, t, e)  # => [[1, 6], [7, 4]]
14133
14134
14135# 'condition' tensor is [True, False]
14136# 't' is [[1, 2],
14137#         [3, 4]]
14138# 'e' is [[5, 6],
14139#         [7, 8]]
14140select(condition, t, e) ==> [[1, 2],
14141                             [7, 8]]
14142
14143```
14144  }];
14145
14146  let arguments = (ins
14147    TF_BoolTensor:$condition,
14148    Arg<TF_Tensor, [{= A `Tensor` which may have the same shape as `condition`.
14149If `condition` is rank 1, `x` may have higher rank,
14150but its first dimension must match the size of `condition`.}]>:$t,
14151    Arg<TF_Tensor, [{= A `Tensor` with the same type and shape as `x`.}]>:$e
14152  );
14153
14154  let results = (outs
14155    Res<TF_Tensor, [{= A `Tensor` with the same type and shape as `x` and `y`.}]>:$output
14156  );
14157
14158  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14159
14160  let verifier = [{
14161    return Verify(*this);
14162  }];
14163}
14164
14165def TF_SelectV2Op : TF_Op<"SelectV2", [NoSideEffect, ResultsBroadcastableShape]> {
14166  let summary = "";
14167
14168  let arguments = (ins
14169    TF_BoolTensor:$condition,
14170    TF_Tensor:$t,
14171    TF_Tensor:$e
14172  );
14173
14174  let results = (outs
14175    TF_Tensor:$output
14176  );
14177
14178  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14179
14180  let builders = [
14181    OpBuilder<(ins "Value":$condition, "Value":$e, "Value":$t)>
14182  ];
14183}
14184
14185def TF_SelfAdjointEigV2Op : TF_Op<"SelfAdjointEigV2", [NoSideEffect]> {
14186  let summary = [{
14187Computes the eigen decomposition of one or more square self-adjoint matrices.
14188  }];
14189
14190  let description = [{
14191Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
14192`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
14193are sorted in non-decreasing order.
14194
14195```python
14196# a is a tensor.
14197# e is a tensor of eigenvalues.
14198# v is a tensor of eigenvectors.
14199e, v = self_adjoint_eig(a)
14200e = self_adjoint_eig(a, compute_v=False)
14201```
14202  }];
14203
14204  let arguments = (ins
14205    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{`Tensor` input of shape `[N, N]`.}]>:$input,
14206
14207    DefaultValuedAttr<BoolAttr, "true">:$compute_v
14208  );
14209
14210  let results = (outs
14211    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvalues. Shape is `[N]`.}]>:$e,
14212    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvectors. Shape is `[N, N]`.}]>:$v
14213  );
14214
14215  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14216}
14217
14218def TF_SeluOp : TF_Op<"Selu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14219  let summary = [{
14220Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
14221  }];
14222
14223  let description = [{
14224if < 0, `scale * features` otherwise.
14225
14226To be used together with
14227`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
14228For correct dropout, use `tf.contrib.nn.alpha_dropout`.
14229
14230See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
14231  }];
14232
14233  let arguments = (ins
14234    TF_FloatTensor:$features
14235  );
14236
14237  let results = (outs
14238    TF_FloatTensor:$activations
14239  );
14240
14241  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14242}
14243
14244def TF_SeluGradOp : TF_Op<"SeluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14245  let summary = [{
14246Computes gradients for the scaled exponential linear (Selu) operation.
14247  }];
14248
14249  let arguments = (ins
14250    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Selu operation.}]>:$gradients,
14251    Arg<TF_FloatTensor, [{The outputs of the corresponding Selu operation.}]>:$outputs
14252  );
14253
14254  let results = (outs
14255    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + scale * alpha)`
14256if outputs < 0, `scale * gradients` otherwise.}]>:$backprops
14257  );
14258
14259  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14260}
14261
14262def TF_SendOp : TF_Op<"Send", []> {
14263  let summary = "Sends the named tensor from send_device to recv_device.";
14264
14265  let arguments = (ins
14266    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
14267
14268    StrAttr:$tensor_name,
14269    StrAttr:$send_device,
14270    I64Attr:$send_device_incarnation,
14271    StrAttr:$recv_device,
14272    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
14273  );
14274
14275  let results = (outs);
14276
14277  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14278}
14279
14280def TF_SendTPUEmbeddingGradientsOp : TF_Op<"SendTPUEmbeddingGradients", [AttrSizedOperandSegments, TF_TPUEmbeddingSideEffect]> {
14281  let summary = "Performs gradient updates of embedding tables.";
14282
14283  let arguments = (ins
14284    Arg<Variadic<TF_Float32Tensor>, [{A TensorList of gradients with which to update embedding tables.
14285This argument has the same length and shapes as the return value of
14286RecvTPUEmbeddingActivations, but contains gradients of the model's loss
14287with respect to the embedding activations. The embedding tables are updated
14288from these gradients via the optimizer specified in the TPU embedding
14289configuration given to tpu.initialize_system.}]>:$inputs,
14290    Arg<Variadic<TF_Float32Tensor>, [{A TensorList of float32 scalars, one for each dynamic learning
14291rate tag: see the comments in
14292//third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
14293Multiple tables can share the same dynamic learning rate tag as specified
14294in the configuration. If the learning rates for all tables are constant,
14295this list should be empty.}]>:$learning_rates,
14296
14297    StrAttr:$config
14298  );
14299
14300  let results = (outs);
14301
14302  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
14303  TF_DerivedOperandSizeAttr NN = TF_DerivedOperandSizeAttr<1>;
14304}
14305
14306def TF_SerializeIteratorOp : TF_Op<"SerializeIterator", []> {
14307  let summary = [{
14308Converts the given `resource_handle` representing an iterator to a variant tensor.
14309  }];
14310
14311  let arguments = (ins
14312    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle,
14313
14314    DefaultValuedAttr<I64Attr, "0">:$external_state_policy
14315  );
14316
14317  let results = (outs
14318    Res<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
14319resource.}]>:$serialized
14320  );
14321}
14322
14323def TF_SerializeSparseOp : TF_Op<"SerializeSparse", [NoSideEffect]> {
14324  let summary = "Serialize a `SparseTensor` into a `[3]` `Tensor` object.";
14325
14326  let arguments = (ins
14327    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the `SparseTensor`.}]>:$sparse_indices,
14328    Arg<TF_Tensor, [{1-D.  The `values` of the `SparseTensor`.}]>:$sparse_values,
14329    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the `SparseTensor`.}]>:$sparse_shape
14330  );
14331
14332  let results = (outs
14333    TensorOf<[TF_Str, TF_Variant]>:$serialized_sparse
14334  );
14335
14336  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14337  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
14338}
14339
14340def TF_ShapeOp : TF_Op<"Shape", [NoSideEffect]> {
14341  let summary = "Returns the shape of a tensor.";
14342
14343  let description = [{
14344This operation returns a 1-D integer tensor representing the shape of `input`.
14345
14346For example:
14347
14348```
14349# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
14350shape(t) ==> [2, 2, 3]
14351```
14352  }];
14353
14354  let arguments = (ins
14355    TF_Tensor:$input
14356  );
14357
14358  let results = (outs
14359    TF_I32OrI64Tensor:$output
14360  );
14361
14362  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14363  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
14364
14365  let verifier = [{
14366    return Verify(*this);
14367  }];
14368
14369  let builders = [
14370    OpBuilder<(ins "Value":$input, "BoolAttr":$use32Bit)>
14371  ];
14372
14373  let hasFolder = 1;
14374}
14375
14376def TF_ShapeNOp : TF_Op<"ShapeN", [NoSideEffect]> {
14377  let summary = "Returns shape of tensors.";
14378
14379  let description = [{
14380This operation returns N 1-D integer tensors representing shape of `input[i]s`.
14381  }];
14382
14383  let arguments = (ins
14384    Variadic<TF_Tensor>:$input
14385  );
14386
14387  let results = (outs
14388    Variadic<TF_I32OrI64Tensor>:$output
14389  );
14390
14391  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
14392  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14393  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
14394
14395  let verifier = [{
14396    return Verify(*this);
14397  }];
14398
14399  let hasCanonicalizer = 1;
14400}
14401
14402def TF_ShardedFilenameOp : TF_Op<"ShardedFilename", [NoSideEffect]> {
14403  let summary = [{
14404Generate a sharded filename. The filename is printf formatted as
14405  }];
14406
14407  let description = [{
14408%s-%05d-of-%05d, basename, shard, num_shards.
14409  }];
14410
14411  let arguments = (ins
14412    TF_StrTensor:$basename,
14413    TF_Int32Tensor:$shard,
14414    TF_Int32Tensor:$num_shards
14415  );
14416
14417  let results = (outs
14418    TF_StrTensor:$filename
14419  );
14420}
14421
14422def TF_ShuffleAndRepeatDatasetV2Op : TF_Op<"ShuffleAndRepeatDatasetV2", []> {
14423  let summary = "";
14424
14425  let arguments = (ins
14426    TF_VariantTensor:$input_dataset,
14427    TF_Int64Tensor:$buffer_size,
14428    TF_Int64Tensor:$seed,
14429    TF_Int64Tensor:$seed2,
14430    TF_Int64Tensor:$count,
14431    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
14432
14433    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
14434    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
14435    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
14436  );
14437
14438  let results = (outs
14439    TF_VariantTensor:$handle
14440  );
14441}
14442
14443def TF_ShuffleDatasetV2Op : TF_Op<"ShuffleDatasetV2", []> {
14444  let summary = "";
14445
14446  let arguments = (ins
14447    TF_VariantTensor:$input_dataset,
14448    TF_Int64Tensor:$buffer_size,
14449    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
14450
14451    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
14452    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
14453  );
14454
14455  let results = (outs
14456    TF_VariantTensor:$handle
14457  );
14458}
14459
14460def TF_ShuffleDatasetV3Op : TF_Op<"ShuffleDatasetV3", []> {
14461  let summary = "";
14462
14463  let arguments = (ins
14464    TF_VariantTensor:$input_dataset,
14465    TF_Int64Tensor:$buffer_size,
14466    TF_Int64Tensor:$seed,
14467    TF_Int64Tensor:$seed2,
14468    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
14469
14470    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
14471    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
14472    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
14473  );
14474
14475  let results = (outs
14476    TF_VariantTensor:$handle
14477  );
14478}
14479
14480def TF_ShutdownDistributedTPUOp : TF_Op<"ShutdownDistributedTPU", []> {
14481  let summary = "Shuts down a running distributed TPU system.";
14482
14483  let description = [{
14484The op returns an error if no system is running.
14485  }];
14486
14487  let arguments = (ins);
14488
14489  let results = (outs);
14490}
14491
14492def TF_SigmoidOp : TF_Op<"Sigmoid", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14493  let summary = "Computes sigmoid of `x` element-wise.";
14494
14495  let description = [{
14496Specifically, `y = 1 / (1 + exp(-x))`.
14497  }];
14498
14499  let arguments = (ins
14500    TF_FpOrComplexTensor:$x
14501  );
14502
14503  let results = (outs
14504    TF_FpOrComplexTensor:$y
14505  );
14506
14507  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14508}
14509
14510def TF_SigmoidGradOp : TF_Op<"SigmoidGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14511  let summary = "Computes the gradient of the sigmoid of `x` wrt its input.";
14512
14513  let description = [{
14514Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
14515`dy` is the corresponding input gradient.
14516  }];
14517
14518  let arguments = (ins
14519    TF_FpOrComplexTensor:$y,
14520    TF_FpOrComplexTensor:$dy
14521  );
14522
14523  let results = (outs
14524    TF_FpOrComplexTensor:$z
14525  );
14526
14527  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14528}
14529
14530def TF_SignOp : TF_Op<"Sign", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
14531  let summary = "Returns an element-wise indication of the sign of a number.";
14532
14533  let description = [{
14534`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
14535
14536For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
14537
14538Example usage:
14539>>> tf.math.sign([0., 2., -3.])
14540<tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
14541  }];
14542
14543  let arguments = (ins
14544    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
14545  );
14546
14547  let results = (outs
14548    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
14549  );
14550
14551  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14552}
14553
14554def TF_SinOp : TF_Op<"Sin", [NoSideEffect, SameOperandsAndResultType]> {
14555  let summary = "Computes sine of x element-wise.";
14556
14557  let description = [{
14558Given an input tensor, this function computes sine of every
14559  element in the tensor. Input range is `(-inf, inf)` and
14560  output range is `[-1,1]`.
14561
14562  ```python
14563  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
14564  tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
14565  ```
14566  }];
14567
14568  let arguments = (ins
14569    TF_FpOrComplexTensor:$x
14570  );
14571
14572  let results = (outs
14573    TF_FpOrComplexTensor:$y
14574  );
14575
14576  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14577}
14578
14579def TF_SinhOp : TF_Op<"Sinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14580  let summary = "Computes hyperbolic sine of x element-wise.";
14581
14582  let description = [{
14583Given an input tensor, this function computes hyperbolic sine of every
14584  element in the tensor. Input range is `[-inf,inf]` and output range
14585  is `[-inf,inf]`.
14586
14587  ```python
14588  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
14589  tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
14590  ```
14591  }];
14592
14593  let arguments = (ins
14594    TF_FpOrComplexTensor:$x
14595  );
14596
14597  let results = (outs
14598    TF_FpOrComplexTensor:$y
14599  );
14600
14601  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14602}
14603
14604def TF_SizeOp : TF_Op<"Size", [NoSideEffect]> {
14605  let summary = "Returns the size of a tensor.";
14606
14607  let description = [{
14608This operation returns an integer representing the number of elements in
14609`input`.
14610
14611For example:
14612
14613```
14614# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
14615size(t) ==> 12
14616```
14617  }];
14618
14619  let arguments = (ins
14620    TF_Tensor:$input
14621  );
14622
14623  let results = (outs
14624    TF_I32OrI64Tensor:$output
14625  );
14626
14627  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14628  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
14629
14630  let verifier = [{
14631    return Verify(*this);
14632  }];
14633
14634  let hasFolder = 1;
14635}
14636
14637def TF_SliceOp : TF_Op<"Slice", [NoSideEffect, PredOpTrait<"input and output must have same element type", TCresVTEtIsSameAsOp<0, 0>>]> {
14638  let summary = "Return a slice from 'input'.";
14639
14640  let description = [{
14641The output tensor is a tensor with dimensions described by 'size'
14642whose values are extracted from 'input' starting at the offsets in
14643'begin'.
14644
14645*Requirements*:
14646  0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
14647  }];
14648
14649  let arguments = (ins
14650    TF_Tensor:$input,
14651    Arg<TF_I32OrI64Tensor, [{begin[i] specifies the offset into the 'i'th dimension of
14652'input' to slice from.}]>:$begin,
14653    Arg<TF_I32OrI64Tensor, [{size[i] specifies the number of elements of the 'i'th dimension
14654of 'input' to slice. If size[i] is -1, all remaining elements in dimension
14655i are included in the slice (i.e. this is equivalent to setting
14656size[i] = input.dim_size(i) - begin[i]).}]>:$size
14657  );
14658
14659  let results = (outs
14660    TF_Tensor:$output
14661  );
14662
14663  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
14664  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14665
14666  let verifier = [{
14667    return Verify(*this);
14668  }];
14669}
14670
14671def TF_SnapshotOp : TF_Op<"Snapshot", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14672  let summary = "Returns a copy of the input tensor.";
14673
14674  let arguments = (ins
14675    TF_Tensor:$input
14676  );
14677
14678  let results = (outs
14679    TF_Tensor:$output
14680  );
14681
14682  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14683}
14684
14685def TF_SoftmaxOp : TF_Op<"Softmax", [NoSideEffect, SameOperandsAndResultType]> {
14686  let summary = "Computes softmax activations.";
14687
14688  let description = [{
14689For each batch `i` and class `j` we have
14690
14691    $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
14692  }];
14693
14694  let arguments = (ins
14695    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
14696  );
14697
14698  let results = (outs
14699    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$softmax
14700  );
14701
14702  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14703
14704  let verifier = [{
14705    return Verify(*this);
14706  }];
14707}
14708
14709def TF_SoftmaxCrossEntropyWithLogitsOp : TF_Op<"SoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
14710  let summary = [{
14711Computes softmax cross entropy cost and gradients to backpropagate.
14712  }];
14713
14714  let description = [{
14715Inputs are the logits, not probabilities.
14716  }];
14717
14718  let arguments = (ins
14719    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
14720    Arg<TF_FloatTensor, [{batch_size x num_classes matrix
14721The caller must ensure that each batch of labels represents a valid
14722probability distribution.}]>:$labels
14723  );
14724
14725  let results = (outs
14726    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
14727    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
14728  );
14729
14730  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14731
14732  let verifier = [{
14733    return Verify(*this);
14734  }];
14735}
14736
14737def TF_SoftplusOp : TF_Op<"Softplus", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14738  let summary = "";
14739
14740  let arguments = (ins
14741    TF_FloatTensor:$features
14742  );
14743
14744  let results = (outs
14745    TF_FloatTensor:$activations
14746  );
14747
14748  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14749}
14750
14751def TF_SoftplusGradOp : TF_Op<"SoftplusGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14752  let summary = "Computes softplus gradients for a softplus operation.";
14753
14754  let arguments = (ins
14755    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softplus operation.}]>:$gradients,
14756    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softplus operation.}]>:$features
14757  );
14758
14759  let results = (outs
14760    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + exp(-features))`.}]>:$backprops
14761  );
14762
14763  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14764}
14765
14766def TF_SoftsignOp : TF_Op<"Softsign", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14767  let summary = "Computes softsign: `features / (abs(features) + 1)`.";
14768
14769  let arguments = (ins
14770    TF_FloatTensor:$features
14771  );
14772
14773  let results = (outs
14774    TF_FloatTensor:$activations
14775  );
14776
14777  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14778}
14779
14780def TF_SoftsignGradOp : TF_Op<"SoftsignGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14781  let summary = "Computes softsign gradients for a softsign operation.";
14782
14783  let arguments = (ins
14784    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softsign operation.}]>:$gradients,
14785    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softsign operation.}]>:$features
14786  );
14787
14788  let results = (outs
14789    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + abs(features)) ** 2`.}]>:$backprops
14790  );
14791
14792  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14793}
14794
14795def TF_SpaceToBatchOp : TF_Op<"SpaceToBatch", [NoSideEffect]> {
14796  let summary = "SpaceToBatch for 4-D tensors of type T.";
14797
14798  let description = [{
14799This is a legacy version of the more general SpaceToBatchND.
14800
14801Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
14802More specifically, this op outputs a copy of the input tensor where values from
14803the `height` and `width` dimensions are moved to the `batch` dimension. After
14804the zero-padding, both `height` and `width` of the input must be divisible by the
14805block size.
14806
14807The attr `block_size` must be greater than one. It indicates the block size.
14808
14809  * Non-overlapping blocks of size `block_size x block size` in the height and
14810    width dimensions are rearranged into the batch dimension at each location.
14811  * The batch of the output tensor is `batch * block_size * block_size`.
14812  * Both height_pad and width_pad must be divisible by block_size.
14813
14814The shape of the output will be:
14815
14816    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
14817     depth]
14818
14819Some examples:
14820
14821(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
14822
14823```
14824x = [[[[1], [2]], [[3], [4]]]]
14825```
14826
14827The output tensor has shape `[4, 1, 1, 1]` and value:
14828
14829```
14830[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
14831```
14832
14833(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
14834
14835```
14836x = [[[[1, 2, 3], [4, 5, 6]],
14837      [[7, 8, 9], [10, 11, 12]]]]
14838```
14839
14840The output tensor has shape `[4, 1, 1, 3]` and value:
14841
14842```
14843[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
14844```
14845
14846(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
14847
14848```
14849x = [[[[1],   [2],  [3],  [4]],
14850      [[5],   [6],  [7],  [8]],
14851      [[9],  [10], [11],  [12]],
14852      [[13], [14], [15],  [16]]]]
14853```
14854
14855The output tensor has shape `[4, 2, 2, 1]` and value:
14856
14857```
14858x = [[[[1], [3]], [[9], [11]]],
14859     [[[2], [4]], [[10], [12]]],
14860     [[[5], [7]], [[13], [15]]],
14861     [[[6], [8]], [[14], [16]]]]
14862```
14863
14864(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
14865
14866```
14867x = [[[[1],   [2],  [3],  [4]],
14868      [[5],   [6],  [7],  [8]]],
14869     [[[9],  [10], [11],  [12]],
14870      [[13], [14], [15],  [16]]]]
14871```
14872
14873The output tensor has shape `[8, 1, 2, 1]` and value:
14874
14875```
14876x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
14877     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
14878```
14879
14880Among others, this operation is useful for reducing atrous convolution into
14881regular convolution.
14882  }];
14883
14884  let arguments = (ins
14885    Arg<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`.}]>:$input,
14886    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
14887  the padding of the input with zeros across the spatial dimensions as follows:
14888
14889      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
14890
14891  The effective spatial dimensions of the zero-padded input tensor will be:
14892
14893      height_pad = pad_top + height + pad_bottom
14894      width_pad = pad_left + width + pad_right}]>:$paddings,
14895
14896    Confined<I64Attr, [IntMinValue<2>]>:$block_size
14897  );
14898
14899  let results = (outs
14900    TF_Tensor:$output
14901  );
14902
14903  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14904  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
14905}
14906
14907def TF_SpaceToBatchNDOp : TF_Op<"SpaceToBatchND", [NoSideEffect]> {
14908  let summary = "SpaceToBatch for N-D tensors of type T.";
14909
14910  let description = [{
14911This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
14912grid of blocks of shape `block_shape`, and interleaves these blocks with the
14913"batch" dimension (0) such that in the output, the spatial dimensions
14914`[1, ..., M]` correspond to the position within the grid, and the batch
14915dimension combines both the position within a spatial block and the original
14916batch position.  Prior to division into blocks, the spatial dimensions of the
14917input are optionally zero padded according to `paddings`. See below for a
14918precise description.
14919
14920This operation is equivalent to the following steps:
14921
149221. Zero-pad the start and end of dimensions `[1, ..., M]` of the
14923   input according to `paddings` to produce `padded` of shape `padded_shape`.
14924
149252. Reshape `padded` to `reshaped_padded` of shape:
14926
14927     [batch] +
14928     [padded_shape[1] / block_shape[0],
14929       block_shape[0],
14930      ...,
14931      padded_shape[M] / block_shape[M-1],
14932      block_shape[M-1]] +
14933     remaining_shape
14934
149353. Permute dimensions of `reshaped_padded` to produce
14936   `permuted_reshaped_padded` of shape:
14937
14938     block_shape +
14939     [batch] +
14940     [padded_shape[1] / block_shape[0],
14941      ...,
14942      padded_shape[M] / block_shape[M-1]] +
14943     remaining_shape
14944
149454. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
14946   dimension, producing an output tensor of shape:
14947
14948     [batch * prod(block_shape)] +
14949     [padded_shape[1] / block_shape[0],
14950      ...,
14951      padded_shape[M] / block_shape[M-1]] +
14952     remaining_shape
14953
14954Some examples:
14955
14956(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
14957    `paddings = [[0, 0], [0, 0]]`:
14958
14959```
14960x = [[[[1], [2]], [[3], [4]]]]
14961```
14962
14963The output tensor has shape `[4, 1, 1, 1]` and value:
14964
14965```
14966[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
14967```
14968
14969(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
14970    `paddings = [[0, 0], [0, 0]]`:
14971
14972```
14973x = [[[[1, 2, 3], [4, 5, 6]],
14974      [[7, 8, 9], [10, 11, 12]]]]
14975```
14976
14977The output tensor has shape `[4, 1, 1, 3]` and value:
14978
14979```
14980[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
14981```
14982
14983(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
14984    `paddings = [[0, 0], [0, 0]]`:
14985
14986```
14987x = [[[[1],   [2],  [3],  [4]],
14988      [[5],   [6],  [7],  [8]],
14989      [[9],  [10], [11],  [12]],
14990      [[13], [14], [15],  [16]]]]
14991```
14992
14993The output tensor has shape `[4, 2, 2, 1]` and value:
14994
14995```
14996x = [[[[1], [3]], [[9], [11]]],
14997     [[[2], [4]], [[10], [12]]],
14998     [[[5], [7]], [[13], [15]]],
14999     [[[6], [8]], [[14], [16]]]]
15000```
15001
15002(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
15003    paddings = `[[0, 0], [2, 0]]`:
15004
15005```
15006x = [[[[1],   [2],  [3],  [4]],
15007      [[5],   [6],  [7],  [8]]],
15008     [[[9],  [10], [11],  [12]],
15009      [[13], [14], [15],  [16]]]]
15010```
15011
15012The output tensor has shape `[8, 1, 3, 1]` and value:
15013
15014```
15015x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
15016     [[[0], [2], [4]]], [[[0], [10], [12]]],
15017     [[[0], [5], [7]]], [[[0], [13], [15]]],
15018     [[[0], [6], [8]]], [[[0], [14], [16]]]]
15019```
15020
15021Among others, this operation is useful for reducing atrous convolution into
15022regular convolution.
15023  }];
15024
15025  let arguments = (ins
15026    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
15027where spatial_shape has `M` dimensions.}]>:$input,
15028    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
15029    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
15030  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
15031  `i + 1`, which corresponds to spatial dimension `i`.  It is required that
15032  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.}]>:$paddings
15033  );
15034
15035  let results = (outs
15036    TF_Tensor:$output
15037  );
15038
15039  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15040  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
15041  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<2>;
15042
15043  let verifier = [{ return Verify(*this); }];
15044
15045  let extraClassDeclaration = [{
15046    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
15047      return ArraysAreCastCompatible(l, r);
15048    }
15049  }];
15050}
15051
15052def TF_SpaceToDepthOp : TF_Op<"SpaceToDepth", [NoSideEffect]> {
15053  let summary = "SpaceToDepth for tensors of type T.";
15054
15055  let description = [{
15056Rearranges blocks of spatial data, into depth. More specifically,
15057this op outputs a copy of the input tensor where values from the `height`
15058and `width` dimensions are moved to the `depth` dimension.
15059The attr `block_size` indicates the input block size.
15060
15061  * Non-overlapping blocks of size `block_size x block size` are rearranged
15062    into depth at each location.
15063  * The depth of the output tensor is `block_size * block_size * input_depth`.
15064  * The Y, X coordinates within each block of the input become the high order
15065    component of the output channel index.
15066  * The input tensor's height and width must be divisible by block_size.
15067
15068The `data_format` attr specifies the layout of the input and output tensors
15069with the following options:
15070  "NHWC": `[ batch, height, width, channels ]`
15071  "NCHW": `[ batch, channels, height, width ]`
15072  "NCHW_VECT_C":
15073      `qint8 [ batch, channels / 4, height, width, 4 ]`
15074
15075It is useful to consider the operation as transforming a 6-D Tensor.
15076e.g. for data_format = NHWC,
15077     Each element in the input tensor can be specified via 6 coordinates,
15078     ordered by decreasing memory layout significance as:
15079     n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
15080                        within the output image, bX, bY means coordinates
15081                        within the input block, iC means input channels).
15082     The output would be a transpose to the following layout:
15083     n,oY,oX,bY,bX,iC
15084
15085This operation is useful for resizing the activations between convolutions
15086(but keeping all data), e.g. instead of pooling. It is also useful for training
15087purely convolutional models.
15088
15089For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
15090block_size = 2:
15091
15092```
15093x = [[[[1], [2]],
15094      [[3], [4]]]]
15095```
15096
15097This operation will output a tensor of shape `[1, 1, 1, 4]`:
15098
15099```
15100[[[[1, 2, 3, 4]]]]
15101```
15102
15103Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
15104the corresponding output will have a single element (i.e. width and height are
15105both 1) and will have a depth of 4 channels (1 * block_size * block_size).
15106The output element shape is `[1, 1, 4]`.
15107
15108For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
15109
15110```
15111x = [[[[1, 2, 3], [4, 5, 6]],
15112      [[7, 8, 9], [10, 11, 12]]]]
15113```
15114
15115This operation, for block_size of 2, will return the following tensor of shape
15116`[1, 1, 1, 12]`
15117
15118```
15119[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
15120```
15121
15122Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
15123
15124```
15125x = [[[[1],   [2],  [5],  [6]],
15126      [[3],   [4],  [7],  [8]],
15127      [[9],  [10], [13],  [14]],
15128      [[11], [12], [15],  [16]]]]
15129```
15130
15131the operator will return the following tensor of shape `[1 2 2 4]`:
15132
15133```
15134x = [[[[1, 2, 3, 4],
15135       [5, 6, 7, 8]],
15136      [[9, 10, 11, 12],
15137       [13, 14, 15, 16]]]]
15138```
15139  }];
15140
15141  let arguments = (ins
15142    TF_Tensor:$input,
15143
15144    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
15145    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
15146  );
15147
15148  let results = (outs
15149    TF_Tensor:$output
15150  );
15151
15152  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15153}
15154
15155def TF_SparseAddOp : TF_Op<"SparseAdd", [NoSideEffect]> {
15156  let summary = [{
15157Adds two `SparseTensor` objects to produce another `SparseTensor`.
15158  }];
15159
15160  let description = [{
15161The input `SparseTensor` objects' indices are assumed ordered in standard
15162lexicographic order.  If this is not the case, before this step run
15163`SparseReorder` to restore index ordering.
15164
15165By default, if two values sum to zero at some index, the output `SparseTensor`
15166would still include that particular location in its index, storing a zero in the
15167corresponding value slot.  To override this, callers can specify `thresh`,
15168indicating that if the sum has a magnitude strictly smaller than `thresh`, its
15169corresponding value and index would then not be included.  In particular,
15170`thresh == 0` (default) means everything is kept and actual thresholding happens
15171only for a positive value.
15172
15173In the following shapes, `nnz` is the count after taking `thresh` into account.
15174  }];
15175
15176  let arguments = (ins
15177    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.}]>:$a_indices,
15178    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D.  The `values` of the first `SparseTensor`, size `[nnz]` Vector.}]>:$a_values,
15179    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the first `SparseTensor`, size `[ndims]` Vector.}]>:$a_shape,
15180    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.}]>:$b_indices,
15181    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D.  The `values` of the second `SparseTensor`, size `[nnz]` Vector.}]>:$b_values,
15182    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the second `SparseTensor`, size `[ndims]` Vector.}]>:$b_shape,
15183    Arg<TF_IntOrFpTensor, [{0-D.  The magnitude threshold that determines if an output value/index
15184pair takes space.}]>:$thresh
15185  );
15186
15187  let results = (outs
15188    TF_Int64Tensor:$sum_indices,
15189    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$sum_values,
15190    TF_Int64Tensor:$sum_shape
15191  );
15192
15193  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
15194  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<6>;
15195}
15196
15197def TF_SparseFillEmptyRowsOp : TF_Op<"SparseFillEmptyRows", [NoSideEffect]> {
15198  let summary = [{
15199Fills empty rows in the input 2-D `SparseTensor` with a default value.
15200  }];
15201
15202  let description = [{
15203The input `SparseTensor` is represented via the tuple of inputs
15204(`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
15205same `dense_shape` but with indices `output_indices` and values
15206`output_values`.
15207
15208This op inserts a single entry for every row that doesn't have any values.
15209The index is created as `[row, 0, ..., 0]` and the inserted value
15210is `default_value`.
15211
15212For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
15213
15214    [0, 1]: a
15215    [0, 3]: b
15216    [2, 0]: c
15217    [3, 1]: d
15218
15219Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
15220
15221    [0, 1]: a
15222    [0, 3]: b
15223    [1, 0]: default_value
15224    [2, 0]: c
15225    [3, 1]: d
15226    [4, 0]: default_value
15227
15228The output `SparseTensor` will be in row-major order and will have the
15229same shape as the input.
15230
15231This op also returns an indicator vector shaped `[dense_shape[0]]` such that
15232
15233    empty_row_indicator[i] = True iff row i was an empty row.
15234
15235And a reverse index map vector shaped `[indices.shape[0]]` that is used during
15236backpropagation,
15237
15238    reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
15239  }];
15240
15241  let arguments = (ins
15242    Arg<TF_Int64Tensor, [{2-D. the indices of the sparse tensor.}]>:$indices,
15243    Arg<TF_Tensor, [{1-D. the values of the sparse tensor.}]>:$values,
15244    Arg<TF_Int64Tensor, [{1-D. the shape of the sparse tensor.}]>:$dense_shape,
15245    Arg<TF_Tensor, [{0-D. default value to insert into location `[row, 0, ..., 0]`
15246  for rows missing from the input sparse tensor.
15247output indices: 2-D. the indices of the filled sparse tensor.}]>:$default_value
15248  );
15249
15250  let results = (outs
15251    TF_Int64Tensor:$output_indices,
15252    Res<TF_Tensor, [{1-D. the values of the filled sparse tensor.}]>:$output_values,
15253    Res<TF_BoolTensor, [{1-D. whether the dense row was missing in the
15254input sparse tensor.}]>:$empty_row_indicator,
15255    Res<TF_Int64Tensor, [{1-D. a map from the input indices to the output indices.}]>:$reverse_index_map
15256  );
15257
15258  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
15259}
15260
15261def TF_SparseMatMulOp : TF_Op<"SparseMatMul", [NoSideEffect]> {
15262  let summary = [{
15263Multiply matrix "a" by matrix "b".
15264  }];
15265
15266  let description = [{
15267The inputs must be two-dimensional matrices and the inner dimension of "a" must
15268match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
15269`SparseTensor`s.  This op is optimized for the case where at least one of "a" or
15270"b" is sparse, in the sense that they have a large proportion of zero values.
15271The breakeven for using this versus a dense matrix multiply on one platform was
1527230% zero values in the sparse matrix.
15273
15274The gradient computation of this operation will only take advantage of sparsity
15275in the input gradient when that gradient comes from a Relu.
15276  }];
15277
15278  let arguments = (ins
15279    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
15280    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
15281
15282    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
15283    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
15284    DefaultValuedAttr<BoolAttr, "false">:$a_is_sparse,
15285    DefaultValuedAttr<BoolAttr, "false">:$b_is_sparse
15286  );
15287
15288  let results = (outs
15289    TF_Float32Tensor:$product
15290  );
15291
15292  TF_DerivedOperandTypeAttr Ta = TF_DerivedOperandTypeAttr<0>;
15293  TF_DerivedOperandTypeAttr Tb = TF_DerivedOperandTypeAttr<1>;
15294}
15295
15296def TF_SparseReduceSumOp : TF_Op<"SparseReduceSum", [NoSideEffect]> {
15297  let summary = [{
15298Computes the sum of elements across dimensions of a SparseTensor.
15299  }];
15300
15301  let description = [{
15302This Op takes a SparseTensor and is the sparse counterpart to
15303`tf.reduce_sum()`.  In particular, this Op also returns a dense `Tensor`
15304instead of a sparse one.
15305
15306Reduces `sp_input` along the dimensions given in `reduction_axes`.  Unless
15307`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
15308`reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
15309with length 1.
15310
15311If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
15312with a single element is returned.  Additionally, the axes can be negative,
15313which are interpreted according to the indexing rules in Python.
15314  }];
15315
15316  let arguments = (ins
15317    Arg<TF_Int64Tensor, [{2-D.  `N x R` matrix with the indices of non-empty values in a
15318SparseTensor, possibly not in canonical ordering.}]>:$input_indices,
15319    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D.  `N` non-empty values corresponding to `input_indices`.}]>:$input_values,
15320    Arg<TF_Int64Tensor, [{1-D.  Shape of the input SparseTensor.}]>:$input_shape,
15321    Arg<TF_Int32Tensor, [{1-D.  Length-`K` vector containing the reduction axes.}]>:$reduction_axes,
15322
15323    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
15324  );
15325
15326  let results = (outs
15327    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{`R-K`-D.  The reduced Tensor.}]>:$output
15328  );
15329
15330  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
15331}
15332
15333def TF_SparseReshapeOp : TF_Op<"SparseReshape", [NoSideEffect]> {
15334  let summary = [{
15335Reshapes a SparseTensor to represent values in a new dense shape.
15336  }];
15337
15338  let description = [{
15339This operation has the same semantics as reshape on the represented dense
15340tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
15341
15342If one component of `new_shape` is the special value -1, the size of that
15343dimension is computed so that the total dense size remains constant.  At
15344most one component of `new_shape` can be -1.  The number of dense elements
15345implied by `new_shape` must be the same as the number of dense elements
15346originally implied by `input_shape`.
15347
15348Reshaping does not affect the order of values in the SparseTensor.
15349
15350If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
15351has length `R_out`, then `input_indices` has shape `[N, R_in]`,
15352`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
15353`output_shape` has length `R_out`.
15354  }];
15355
15356  let arguments = (ins
15357    Arg<TF_Int64Tensor, [{2-D.  `N x R_in` matrix with the indices of non-empty values in a
15358SparseTensor.}]>:$input_indices,
15359    Arg<TF_Int64Tensor, [{1-D.  `R_in` vector with the input SparseTensor's dense shape.}]>:$input_shape,
15360    Arg<TF_Int64Tensor, [{1-D.  `R_out` vector with the requested new dense shape.}]>:$new_shape
15361  );
15362
15363  let results = (outs
15364    Res<TF_Int64Tensor, [{2-D.  `N x R_out` matrix with the updated indices of non-empty
15365values in the output SparseTensor.}]>:$output_indices,
15366    Res<TF_Int64Tensor, [{1-D.  `R_out` vector with the full dense shape of the output
15367SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
15368filled in.}]>:$output_shape
15369  );
15370}
15371
15372def TF_SparseSegmentMeanOp : TF_Op<"SparseSegmentMean", [NoSideEffect]> {
15373  let summary = "Computes the mean along sparse segments of a tensor.";
15374
15375  let description = [{
15376See `tf.sparse.segment_sum` for usage examples.
15377
15378Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
15379dimension, selecting a subset of dimension 0, specified by `indices`.
15380  }];
15381
15382  let arguments = (ins
15383    TF_FloatTensor:$data,
15384    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
15385    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
15386  );
15387
15388  let results = (outs
15389    Res<TF_FloatTensor, [{Has same shape as data, except for dimension 0 which
15390has size `k`, the number of segments.}]>:$output
15391  );
15392
15393  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15394  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15395  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15396}
15397
15398def TF_SparseSegmentMeanGradOp : TF_Op<"SparseSegmentMeanGrad", [NoSideEffect]> {
15399  let summary = "Computes gradients for SparseSegmentMean.";
15400
15401  let description = [{
15402Returns tensor "output" with same shape as grad, except for dimension 0 whose
15403value is output_dim0.
15404  }];
15405
15406  let arguments = (ins
15407    Arg<TF_FloatTensor, [{gradient propagated to the SparseSegmentMean op.}]>:$grad,
15408    Arg<TF_I32OrI64Tensor, [{indices passed to the corresponding SparseSegmentMean op.}]>:$indices,
15409    Arg<TF_I32OrI64Tensor, [{segment_ids passed to the corresponding SparseSegmentMean op.}]>:$segment_ids,
15410    Arg<TF_Int32Tensor, [{dimension 0 of "data" passed to SparseSegmentMean op.}]>:$output_dim0
15411  );
15412
15413  let results = (outs
15414    TF_FloatTensor:$output
15415  );
15416
15417  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15418  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15419  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15420}
15421
15422def TF_SparseSegmentMeanWithNumSegmentsOp : TF_Op<"SparseSegmentMeanWithNumSegments", [NoSideEffect]> {
15423  let summary = "Computes the mean along sparse segments of a tensor.";
15424
15425  let description = [{
15426Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
15427missing, the `output` tensor at that position will be zeroed.
15428
15429Read
15430[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
15431for an explanation of segments.
15432  }];
15433
15434  let arguments = (ins
15435    TF_FloatTensor:$data,
15436    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
15437    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids,
15438    Arg<TF_I32OrI64Tensor, [{Should equal the number of distinct segment IDs.}]>:$num_segments
15439  );
15440
15441  let results = (outs
15442    Res<TF_FloatTensor, [{Has same shape as data, except for dimension 0 which has size
15443`num_segments`.}]>:$output
15444  );
15445
15446  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15447  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15448  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<3>;
15449  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15450}
15451
15452def TF_SparseSegmentSqrtNOp : TF_Op<"SparseSegmentSqrtN", [NoSideEffect]> {
15453  let summary = [{
15454Computes the sum along sparse segments of a tensor divided by the sqrt of N.
15455  }];
15456
15457  let description = [{
15458N is the size of the segment being reduced.
15459
15460See `tf.sparse.segment_sum` for usage examples.
15461  }];
15462
15463  let arguments = (ins
15464    TF_FloatTensor:$data,
15465    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
15466    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
15467  );
15468
15469  let results = (outs
15470    Res<TF_FloatTensor, [{Has same shape as data, except for dimension 0 which
15471has size `k`, the number of segments.}]>:$output
15472  );
15473
15474  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15475  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15476  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15477}
15478
15479def TF_SparseSegmentSqrtNGradOp : TF_Op<"SparseSegmentSqrtNGrad", [NoSideEffect]> {
15480  let summary = "Computes gradients for SparseSegmentSqrtN.";
15481
15482  let description = [{
15483Returns tensor "output" with same shape as grad, except for dimension 0 whose
15484value is output_dim0.
15485  }];
15486
15487  let arguments = (ins
15488    Arg<TF_FloatTensor, [{gradient propagated to the SparseSegmentSqrtN op.}]>:$grad,
15489    Arg<TF_I32OrI64Tensor, [{indices passed to the corresponding SparseSegmentSqrtN op.}]>:$indices,
15490    Arg<TF_I32OrI64Tensor, [{segment_ids passed to the corresponding SparseSegmentSqrtN op.}]>:$segment_ids,
15491    Arg<TF_Int32Tensor, [{dimension 0 of "data" passed to SparseSegmentSqrtN op.}]>:$output_dim0
15492  );
15493
15494  let results = (outs
15495    TF_FloatTensor:$output
15496  );
15497
15498  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15499  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15500  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15501}
15502
15503def TF_SparseSegmentSqrtNWithNumSegmentsOp : TF_Op<"SparseSegmentSqrtNWithNumSegments", [NoSideEffect]> {
15504  let summary = [{
15505Computes the sum along sparse segments of a tensor divided by the sqrt of N.
15506  }];
15507
15508  let description = [{
15509N is the size of the segment being reduced.
15510
15511Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
15512missing, the `output` tensor at that position will be zeroed.
15513
15514Read
15515[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
15516for an explanation of segments.
15517  }];
15518
15519  let arguments = (ins
15520    TF_FloatTensor:$data,
15521    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
15522    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids,
15523    Arg<TF_I32OrI64Tensor, [{Should equal the number of distinct segment IDs.}]>:$num_segments
15524  );
15525
15526  let results = (outs
15527    Res<TF_FloatTensor, [{Has same shape as data, except for dimension 0 which
15528has size `k`, the number of segments.}]>:$output
15529  );
15530
15531  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15532  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15533  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<3>;
15534  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15535}
15536
15537def TF_SparseSegmentSumOp : TF_Op<"SparseSegmentSum", [NoSideEffect]> {
15538  let summary = "Computes the sum along sparse segments of a tensor.";
15539
15540  let description = [{
15541Read
15542[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
15543for an explanation of segments.
15544
15545Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
15546dimension, selecting a subset of dimension 0, specified by `indices`.
15547
15548For example:
15549
15550```python
15551c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
15552
15553# Select two rows, one segment.
15554tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
15555# => [[0 0 0 0]]
15556
15557# Select two rows, two segment.
15558tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
15559# => [[ 1  2  3  4]
15560#     [-1 -2 -3 -4]]
15561
15562# Select all rows, two segments.
15563tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
15564# => [[0 0 0 0]
15565#     [5 6 7 8]]
15566
15567# Which is equivalent to:
15568tf.segment_sum(c, tf.constant([0, 0, 1]))
15569```
15570  }];
15571
15572  let arguments = (ins
15573    TF_IntOrFpTensor:$data,
15574    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
15575    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
15576  );
15577
15578  let results = (outs
15579    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
15580has size `k`, the number of segments.}]>:$output
15581  );
15582
15583  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15584  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15585  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
15586}
15587
15588def TF_SparseSoftmaxCrossEntropyWithLogitsOp : TF_Op<"SparseSoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
15589  let summary = [{
15590Computes softmax cross entropy cost and gradients to backpropagate.
15591  }];
15592
15593  let description = [{
15594Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
15595a matrix of label probabilities, but rather a single label per row
15596of features.  This label is considered to have probability 1.0 for the
15597given row.
15598
15599Inputs are the logits, not probabilities.
15600  }];
15601
15602  let arguments = (ins
15603    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
15604    Arg<TF_I32OrI64Tensor, [{batch_size vector with values in [0, num_classes).
15605This is the label for the given minibatch entry.}]>:$labels
15606  );
15607
15608  let results = (outs
15609    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
15610    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
15611  );
15612
15613  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15614  TF_DerivedOperandTypeAttr Tlabels = TF_DerivedOperandTypeAttr<1>;
15615
15616  let verifier = [{ return Verify(*this); }];
15617}
15618
15619def TF_SparseToDenseOp : TF_Op<"SparseToDense", [NoSideEffect]> {
15620  let summary = "Converts a sparse representation into a dense tensor.";
15621
15622  let description = [{
15623Builds an array `dense` with shape `output_shape` such that
15624
15625```
15626# If sparse_indices is scalar
15627dense[i] = (i == sparse_indices ? sparse_values : default_value)
15628
15629# If sparse_indices is a vector, then for each i
15630dense[sparse_indices[i]] = sparse_values[i]
15631
15632# If sparse_indices is an n by d matrix, then for each i in [0, n)
15633dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
15634```
15635
15636All other values in `dense` are set to `default_value`.  If `sparse_values` is a
15637scalar, all sparse indices are set to this single value.
15638
15639Indices should be sorted in lexicographic order, and indices must not
15640contain any repeats. If `validate_indices` is true, these properties
15641are checked during execution.
15642  }];
15643
15644  let arguments = (ins
15645    Arg<TF_I32OrI64Tensor, [{0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
15646index where `sparse_values[i]` will be placed.}]>:$sparse_indices,
15647    Arg<TF_I32OrI64Tensor, [{1-D.  Shape of the dense output tensor.}]>:$output_shape,
15648    Arg<TF_Tensor, [{1-D.  Values corresponding to each row of `sparse_indices`,
15649or a scalar value to be used for all sparse indices.}]>:$sparse_values,
15650    Arg<TF_Tensor, [{Scalar value to set for indices not specified in
15651`sparse_indices`.}]>:$default_value,
15652
15653    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
15654  );
15655
15656  let results = (outs
15657    Res<TF_Tensor, [{Dense output tensor of shape `output_shape`.}]>:$dense
15658  );
15659
15660  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
15661  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
15662}
15663
15664def TF_SplitOp : TF_Op<"Split", [NoSideEffect]> {
15665  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
15666
15667  let arguments = (ins
15668    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
15669`[-rank(value), rank(value))`.}]>:$split_dim,
15670    Arg<TF_Tensor, [{The tensor to split.}]>:$value
15671  );
15672
15673  let results = (outs
15674    Res<Variadic<TF_Tensor>, [{They are identically shaped tensors, whose shape matches that of `value`
15675except along `axis`, where their sizes are
15676`values.shape[split_dim] / num_split`.}]>:$output
15677  );
15678
15679  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
15680  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
15681
15682  let verifier = [{ return Verify(*this); }];
15683}
15684
15685def TF_SplitVOp : TF_Op<"SplitV", [NoSideEffect]> {
15686  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
15687
15688  let arguments = (ins
15689    Arg<TF_Tensor, [{The tensor to split.}]>:$value,
15690    Arg<TF_I32OrI64Tensor, [{list containing the sizes of each output tensor along the split
15691dimension. Must sum to the dimension of value along split_dim.
15692Can contain one -1 indicating that dimension is to be inferred.}]>:$size_splits,
15693    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
15694`[-rank(value), rank(value))`.}]>:$split_dim
15695  );
15696
15697  let results = (outs
15698    Res<Variadic<TF_Tensor>, [{Tensors whose shape matches that of `value`
15699except along `axis`, where their sizes are
15700`size_splits[i]`.}]>:$output
15701  );
15702
15703  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15704  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
15705  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
15706
15707  let verifier = [{ return Verify(*this); }];
15708}
15709
15710def TF_SqrtOp : TF_Op<"Sqrt", [NoSideEffect, SameOperandsAndResultType]> {
15711  let summary = "Computes square root of x element-wise.";
15712
15713  let description = [{
15714I.e., \\(y = \sqrt{x} = x^{1/2}\\).
15715  }];
15716
15717  let arguments = (ins
15718    TF_FpOrComplexTensor:$x
15719  );
15720
15721  let results = (outs
15722    TF_FpOrComplexTensor:$y
15723  );
15724
15725  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15726}
15727
15728def TF_SqrtGradOp : TF_Op<"SqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
15729  let summary = "Computes the gradient for the sqrt of `x` wrt its input.";
15730
15731  let description = [{
15732Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
15733is the corresponding input gradient.
15734  }];
15735
15736  let arguments = (ins
15737    TF_FpOrComplexTensor:$y,
15738    TF_FpOrComplexTensor:$dy
15739  );
15740
15741  let results = (outs
15742    TF_FpOrComplexTensor:$z
15743  );
15744
15745  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15746}
15747
15748def TF_SquareOp : TF_Op<"Square", [NoSideEffect, SameOperandsAndResultType]> {
15749  let summary = "Computes square of x element-wise.";
15750
15751  let description = [{
15752I.e., \\(y = x * x = x^2\\).
15753  }];
15754
15755  let arguments = (ins
15756    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
15757  );
15758
15759  let results = (outs
15760    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
15761  );
15762
15763  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15764
15765  let hasCanonicalizer = 1;
15766}
15767
15768def TF_SquaredDifferenceOp : TF_Op<"SquaredDifference", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
15769                             WithBroadcastableBinOpBuilder {
15770  let summary = "Returns conj(x - y)(x - y) element-wise.";
15771
15772  let description = [{
15773*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
15774[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
15775  }];
15776
15777  let arguments = (ins
15778    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x,
15779    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y
15780  );
15781
15782  let results = (outs
15783    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$z
15784  );
15785
15786  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15787}
15788
15789def TF_SqueezeOp : TF_Op<"Squeeze", [NoSideEffect]> {
15790  let summary = "Removes dimensions of size 1 from the shape of a tensor.";
15791
15792  let description = [{
15793Given a tensor `input`, this operation returns a tensor of the same type with
15794all dimensions of size 1 removed. If you don't want to remove all size 1
15795dimensions, you can remove specific size 1 dimensions by specifying
15796`axis`.
15797
15798For example:
15799
15800```
15801# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
15802shape(squeeze(t)) ==> [2, 3]
15803```
15804
15805Or, to remove specific size 1 dimensions:
15806
15807```
15808# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
15809shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
15810```
15811  }];
15812
15813  let arguments = (ins
15814    Arg<TF_Tensor, [{The `input` to squeeze.}]>:$input,
15815
15816    DefaultValuedAttr<I64ArrayAttr, "{}">:$squeeze_dims
15817  );
15818
15819  let results = (outs
15820    Res<TF_Tensor, [{Contains the same data as `input`, but has one or more dimensions of
15821size 1 removed.}]>:$output
15822  );
15823
15824  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15825}
15826
15827def TF_StackCloseV2Op : TF_Op<"StackCloseV2", []> {
15828  let summary = "Delete the stack from its resource container.";
15829
15830  let arguments = (ins
15831    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackFree]>:$handle
15832  );
15833
15834  let results = (outs);
15835}
15836
15837def TF_StackPopV2Op : TF_Op<"StackPopV2", []> {
15838  let summary = "Pop the element at the top of the stack.";
15839
15840  let arguments = (ins
15841    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle
15842  );
15843
15844  let results = (outs
15845    Res<TF_Tensor, [{The tensor that is popped from the top of the stack.}]>:$elem
15846  );
15847
15848  TF_DerivedResultTypeAttr elem_type = TF_DerivedResultTypeAttr<0>;
15849}
15850
15851def TF_StackPushV2Op : TF_Op<"StackPushV2", []> {
15852  let summary = "Push an element onto the stack.";
15853
15854  let arguments = (ins
15855    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle,
15856    Arg<TF_Tensor, [{The tensor to be pushed onto the stack.}]>:$elem,
15857
15858    DefaultValuedAttr<BoolAttr, "false">:$swap_memory
15859  );
15860
15861  let results = (outs
15862    Res<TF_Tensor, [{The same tensor as the input 'elem'.}]>:$output
15863  );
15864
15865  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
15866}
15867
15868def TF_StackV2Op : TF_Op<"StackV2", [TF_UniqueResourceAllocation]> {
15869  let summary = "A stack that produces elements in first-in last-out order.";
15870
15871  let arguments = (ins
15872    Arg<TF_Int32Tensor, [{The maximum size of the stack if non-negative. If negative, the stack
15873size is unlimited.}]>:$max_size,
15874
15875    TypeAttr:$elem_type,
15876    DefaultValuedAttr<StrAttr, "">:$stack_name
15877  );
15878
15879  let results = (outs
15880    Res<TF_ResourceTensor, [{The handle to the stack.}], [TF_StackAlloc]>:$handle
15881  );
15882}
15883
15884def TF_StatefulStandardNormalV2Op : TF_Op<"StatefulStandardNormalV2", []> {
15885  let summary = "Outputs random values from a normal distribution.";
15886
15887  let description = [{
15888The generated values will have mean 0 and standard deviation 1.
15889  }];
15890
15891  let arguments = (ins
15892    Arg<TF_ResourceTensor, [{The handle of the resource variable that stores the state of the RNG.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
15893    Arg<TF_Int64Tensor, [{The RNG algorithm.}]>:$algorithm,
15894    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape
15895  );
15896
15897  let results = (outs
15898    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random normal values.}]>:$output
15899  );
15900
15901  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
15902  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15903}
15904
15905def TF_StatefulTruncatedNormalOp : TF_Op<"StatefulTruncatedNormal", []> {
15906  let summary = "Outputs random values from a truncated normal distribution.";
15907
15908  let description = [{
15909The generated values follow a normal distribution with mean 0 and standard
15910deviation 1, except that values whose magnitude is more than 2 standard
15911deviations from the mean are dropped and re-picked.
15912  }];
15913
15914  let arguments = (ins
15915    Arg<TF_ResourceTensor, [{The handle of the resource variable that stores the state of the RNG.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
15916    Arg<TF_Int64Tensor, [{The RNG algorithm.}]>:$algorithm,
15917    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape
15918  );
15919
15920  let results = (outs
15921    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15922  );
15923
15924  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
15925  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15926}
15927
15928def TF_StatefulUniformOp : TF_Op<"StatefulUniform", []> {
15929  let summary = "Outputs random values from a uniform distribution.";
15930
15931  let description = [{
15932The generated values follow a uniform distribution in the range `[0, 1)`. The
15933lower bound 0 is included in the range, while the upper bound 1 is excluded.
15934  }];
15935
15936  let arguments = (ins
15937    Arg<TF_ResourceTensor, [{The handle of the resource variable that stores the state of the RNG.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
15938    Arg<TF_Int64Tensor, [{The RNG algorithm.}]>:$algorithm,
15939    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape
15940  );
15941
15942  let results = (outs
15943    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15944  );
15945
15946  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
15947  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15948}
15949
15950def TF_StatelessMultinomialOp : TF_Op<"StatelessMultinomial", [NoSideEffect, TF_NoConstantFold]> {
15951  let summary = "Draws samples from a multinomial distribution.";
15952
15953  let arguments = (ins
15954    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
15955represents the unnormalized log probabilities for all classes.}]>:$logits,
15956    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
15957    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
15958  );
15959
15960  let results = (outs
15961    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
15962contains the drawn class labels with range `[0, num_classes)`.}]>:$output
15963  );
15964
15965  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15966  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<2>;
15967  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
15968}
15969
15970def TF_StatelessParameterizedTruncatedNormalOp : TF_Op<"StatelessParameterizedTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
15971  let summary = "";
15972
15973  let arguments = (ins
15974    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15975    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
15976    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The mean parameter of each batch.}]>:$means,
15977    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stddevs,
15978    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The minimum cutoff. May be -infinity.}]>:$minvals,
15979    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The maximum cutoff. May be +infinity, and must be more than the minval
15980for each batch.}]>:$maxvals
15981  );
15982
15983  let results = (outs
15984    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The outputs are truncated normal samples and are a deterministic function of
15985`shape`, `seed`, `minvals`, `maxvals`, `means` and `stddevs`.}]>:$output
15986  );
15987
15988  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
15989  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15990  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
15991}
15992
15993def TF_StatelessRandomBinomialOp : TF_Op<"StatelessRandomBinomial", [NoSideEffect, TF_NoConstantFold]> {
15994  let summary = [{
15995Outputs deterministic pseudorandom random numbers from a binomial distribution.
15996  }];
15997
15998  let description = [{
15999Outputs random values from a binomial distribution.
16000
16001The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
16002  }];
16003
16004  let arguments = (ins
16005    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16006    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
16007    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The counts of the binomial distribution. Must be broadcastable with `probs`,
16008and broadcastable with the rightmost dimensions of `shape`.}]>:$counts,
16009    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The probability of success for the binomial distribution. Must be broadcastable
16010with `counts` and broadcastable with the rightmost dimensions of `shape`.}]>:$probs
16011  );
16012
16013  let results = (outs
16014    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
16015  );
16016
16017  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
16018  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
16019  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16020  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16021}
16022
16023def TF_StatelessRandomGammaV2Op : TF_Op<"StatelessRandomGammaV2", [NoSideEffect, TF_NoConstantFold]> {
16024  let summary = [{
16025Outputs deterministic pseudorandom random numbers from a gamma distribution.
16026  }];
16027
16028  let description = [{
16029Outputs random values from a gamma distribution.
16030
16031The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
16032  }];
16033
16034  let arguments = (ins
16035    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16036    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
16037    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The concentration of the gamma distribution. Shape must match the rightmost
16038dimensions of `shape`.}]>:$alpha
16039  );
16040
16041  let results = (outs
16042    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{Random values with specified shape.}]>:$output
16043  );
16044
16045  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16046  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16047  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
16048}
16049
16050def TF_StatelessRandomGetAlgOp : TF_Op<"StatelessRandomGetAlg", []> {
16051  let summary = "Picks the best counter-based RNG algorithm based on device.";
16052
16053  let description = [{
16054This op picks the best counter-based RNG algorithm based on device.
16055  }];
16056
16057  let arguments = (ins);
16058
16059  let results = (outs
16060    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16061  );
16062}
16063
16064def TF_StatelessRandomGetKeyCounterOp : TF_Op<"StatelessRandomGetKeyCounter", [NoSideEffect, TF_NoConstantFold]> {
16065  let summary = [{
16066Scrambles seed into key and counter, using the best algorithm based on device.
16067  }];
16068
16069  let description = [{
16070This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
16071  }];
16072
16073  let arguments = (ins
16074    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
16075  );
16076
16077  let results = (outs
16078    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16079    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter
16080  );
16081
16082  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
16083}
16084
16085def TF_StatelessRandomGetKeyCounterAlgOp : TF_Op<"StatelessRandomGetKeyCounterAlg", [NoSideEffect, TF_NoConstantFold]> {
16086  let summary = [{
16087Picks the best algorithm based on device, and scrambles seed into key and counter.
16088  }];
16089
16090  let description = [{
16091This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
16092  }];
16093
16094  let arguments = (ins
16095    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
16096  );
16097
16098  let results = (outs
16099    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16100    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter,
16101    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16102  );
16103
16104  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
16105}
16106
16107def TF_StatelessRandomNormalOp : TF_Op<"StatelessRandomNormal", [NoSideEffect, TF_NoConstantFold]> {
16108  let summary = [{
16109Outputs deterministic pseudorandom values from a normal distribution.
16110  }];
16111
16112  let description = [{
16113The generated values will have mean 0 and standard deviation 1.
16114
16115The outputs are a deterministic function of `shape` and `seed`.
16116  }];
16117
16118  let arguments = (ins
16119    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16120    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
16121  );
16122
16123  let results = (outs
16124    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16125  );
16126
16127  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16128  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16129  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16130}
16131
16132def TF_StatelessRandomNormalV2Op : TF_Op<"StatelessRandomNormalV2", [NoSideEffect]> {
16133  let summary = [{
16134Outputs deterministic pseudorandom values from a normal distribution.
16135  }];
16136
16137  let description = [{
16138The generated values will have mean 0 and standard deviation 1.
16139
16140The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
16141  }];
16142
16143  let arguments = (ins
16144    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16145    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16146    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
16147    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16148  );
16149
16150  let results = (outs
16151    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16152  );
16153
16154  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
16155  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16156}
16157
16158def TF_StatelessRandomPoissonOp : TF_Op<"StatelessRandomPoisson", [NoSideEffect, TF_NoConstantFold]> {
16159  let summary = [{
16160Outputs deterministic pseudorandom random numbers from a Poisson distribution.
16161  }];
16162
16163  let description = [{
16164Outputs random values from a Poisson distribution.
16165
16166The outputs are a deterministic function of `shape`, `seed`, and `lam`.
16167  }];
16168
16169  let arguments = (ins
16170    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16171    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
16172    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The rate of the Poisson distribution. Shape must match the rightmost dimensions
16173of `shape`.}]>:$lam
16174  );
16175
16176  let results = (outs
16177    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
16178  );
16179
16180  TF_DerivedOperandTypeAttr Rtype = TF_DerivedOperandTypeAttr<2>;
16181  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16182  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16183  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16184}
16185
16186def TF_StatelessRandomUniformOp : TF_Op<"StatelessRandomUniform", [NoSideEffect, TF_NoConstantFold]> {
16187  let summary = [{
16188Outputs deterministic pseudorandom random values from a uniform distribution.
16189  }];
16190
16191  let description = [{
16192The generated values follow a uniform distribution in the range `[0, 1)`. The
16193lower bound 0 is included in the range, while the upper bound 1 is excluded.
16194
16195The outputs are a deterministic function of `shape` and `seed`.
16196  }];
16197
16198  let arguments = (ins
16199    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16200    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
16201  );
16202
16203  let results = (outs
16204    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16205  );
16206
16207  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16208  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16209  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16210}
16211
16212def TF_StatelessRandomUniformFullIntOp : TF_Op<"StatelessRandomUniformFullInt", [NoSideEffect, TF_NoConstantFold]> {
16213  let summary = [{
16214Outputs deterministic pseudorandom random integers from a uniform distribution.
16215  }];
16216
16217  let description = [{
16218The generated values are uniform integers covering the whole range of `dtype`.
16219
16220The outputs are a deterministic function of `shape` and `seed`.
16221  }];
16222
16223  let arguments = (ins
16224    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16225    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{2 seeds (shape [2]).}]>:$seed
16226  );
16227
16228  let results = (outs
16229    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
16230  );
16231
16232  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16233  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16234  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16235}
16236
16237def TF_StatelessRandomUniformFullIntV2Op : TF_Op<"StatelessRandomUniformFullIntV2", [NoSideEffect]> {
16238  let summary = [{
16239Outputs deterministic pseudorandom random integers from a uniform distribution.
16240  }];
16241
16242  let description = [{
16243The generated values are uniform integers covering the whole range of `dtype`.
16244
16245The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
16246  }];
16247
16248  let arguments = (ins
16249    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16250    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16251    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
16252    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16253  );
16254
16255  let results = (outs
16256    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
16257  );
16258
16259  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
16260  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16261}
16262
16263def TF_StatelessRandomUniformIntOp : TF_Op<"StatelessRandomUniformInt", [NoSideEffect, TF_NoConstantFold]> {
16264  let summary = [{
16265Outputs deterministic pseudorandom random integers from a uniform distribution.
16266  }];
16267
16268  let description = [{
16269The generated values follow a uniform distribution in the range `[minval, maxval)`.
16270
16271The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
16272  }];
16273
16274  let arguments = (ins
16275    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16276    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
16277    Arg<TF_I32OrI64Tensor, [{Minimum value (inclusive, scalar).}]>:$minval,
16278    Arg<TF_I32OrI64Tensor, [{Maximum value (exclusive, scalar).}]>:$maxval
16279  );
16280
16281  let results = (outs
16282    Res<TF_I32OrI64Tensor, [{Random values with specified shape.}]>:$output
16283  );
16284
16285  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16286  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16287  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
16288}
16289
16290def TF_StatelessRandomUniformIntV2Op : TF_Op<"StatelessRandomUniformIntV2", [NoSideEffect]> {
16291  let summary = [{
16292Outputs deterministic pseudorandom random integers from a uniform distribution.
16293  }];
16294
16295  let description = [{
16296The generated values follow a uniform distribution in the range `[minval, maxval)`.
16297
16298The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
16299  }];
16300
16301  let arguments = (ins
16302    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16303    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16304    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
16305    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg,
16306    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Minimum value (inclusive, scalar).}]>:$minval,
16307    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Maximum value (exclusive, scalar).}]>:$maxval
16308  );
16309
16310  let results = (outs
16311    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
16312  );
16313
16314  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
16315  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<4>;
16316}
16317
16318def TF_StatelessRandomUniformV2Op : TF_Op<"StatelessRandomUniformV2", [NoSideEffect]> {
16319  let summary = [{
16320Outputs deterministic pseudorandom random values from a uniform distribution.
16321  }];
16322
16323  let description = [{
16324The generated values follow a uniform distribution in the range `[0, 1)`. The
16325lower bound 0 is included in the range, while the upper bound 1 is excluded.
16326
16327The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
16328  }];
16329
16330  let arguments = (ins
16331    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16332    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16333    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
16334    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16335  );
16336
16337  let results = (outs
16338    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16339  );
16340
16341  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
16342  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16343}
16344
16345def TF_StatelessTruncatedNormalOp : TF_Op<"StatelessTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
16346  let summary = [{
16347Outputs deterministic pseudorandom values from a truncated normal distribution.
16348  }];
16349
16350  let description = [{
16351The generated values follow a normal distribution with mean 0 and standard
16352deviation 1, except that values whose magnitude is more than 2 standard
16353deviations from the mean are dropped and re-picked.
16354
16355The outputs are a deterministic function of `shape` and `seed`.
16356  }];
16357
16358  let arguments = (ins
16359    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16360    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
16361  );
16362
16363  let results = (outs
16364    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16365  );
16366
16367  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16368  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
16369  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16370}
16371
16372def TF_StatelessTruncatedNormalV2Op : TF_Op<"StatelessTruncatedNormalV2", [NoSideEffect]> {
16373  let summary = [{
16374Outputs deterministic pseudorandom values from a truncated normal distribution.
16375  }];
16376
16377  let description = [{
16378The generated values follow a normal distribution with mean 0 and standard
16379deviation 1, except that values whose magnitude is more than 2 standard
16380deviations from the mean are dropped and re-picked.
16381
16382The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
16383  }];
16384
16385  let arguments = (ins
16386    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
16387    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
16388    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
16389    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
16390  );
16391
16392  let results = (outs
16393    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
16394  );
16395
16396  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
16397  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16398}
16399
16400def TF_StaticRegexFullMatchOp : TF_Op<"StaticRegexFullMatch", [NoSideEffect, SameOperandsAndResultShape]> {
16401  let summary = "Check if the input matches the regex pattern.";
16402
16403  let description = [{
16404The input is a string tensor of any shape. The pattern is the
16405regular expression to be matched with every element of the input tensor.
16406The boolean values (True or False) of the output tensor indicate
16407if the input matches the regex pattern provided.
16408
16409The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
16410  }];
16411
16412  let arguments = (ins
16413    Arg<TF_StrTensor, [{A string tensor of the text to be processed.}]>:$input,
16414
16415    StrAttr:$pattern
16416  );
16417
16418  let results = (outs
16419    Res<TF_BoolTensor, [{A bool tensor with the same shape as `input`.}]>:$output
16420  );
16421}
16422
16423def TF_StopGradientOp : TF_Op<"StopGradient", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>]> {
16424  let summary = "Stops gradient computation.";
16425
16426  let description = [{
16427When executed in a graph, this op outputs its input tensor as-is.
16428
16429When building ops to compute gradients, this op prevents the contribution of
16430its inputs to be taken into account.  Normally, the gradient generator adds ops
16431to a graph to compute the derivatives of a specified 'loss' by recursively
16432finding out inputs that contributed to its computation.  If you insert this op
16433in the graph it inputs are masked from the gradient generator.  They are not
16434taken into account for computing gradients.
16435
16436This is useful any time you want to compute a value with TensorFlow but need
16437to pretend that the value was a constant. For example, the softmax function
16438for a vector x can be written as
16439
16440```python
16441
16442  def softmax(x):
16443    numerator = tf.exp(x)
16444    denominator = tf.reduce_sum(numerator)
16445    return numerator / denominator
16446```
16447
16448This however is susceptible to overflow if the values in x are large. An
16449alternative more stable way is to subtract the maximum of x from each of the
16450values.
16451
16452```python
16453
16454  def stable_softmax(x):
16455    z = x - tf.reduce_max(x)
16456    numerator = tf.exp(z)
16457    denominator = tf.reduce_sum(numerator)
16458    return numerator / denominator
16459```
16460
16461However, when we backprop through the softmax to x, we dont want to backprop
16462through the `tf.reduce_max(x)` (if the max values are not unique then the
16463gradient could flow to the wrong input) calculation and treat that as a
16464constant. Therefore, we should write this out as
16465
16466```python
16467
16468  def stable_softmax(x):
16469    z = x - tf.stop_gradient(tf.reduce_max(x))
16470    numerator = tf.exp(z)
16471    denominator = tf.reduce_sum(numerator)
16472    return numerator / denominator
16473```
16474
16475Some other examples include:
16476
16477*  The *EM* algorithm where the *M-step* should not involve backpropagation
16478   through the output of the *E-step*.
16479*  Contrastive divergence training of Boltzmann machines where, when
16480   differentiating the energy function, the training must not backpropagate
16481   through the graph that generated the samples from the model.
16482*  Adversarial training, where no backprop should happen through the adversarial
16483   example generation process.
16484  }];
16485
16486  let arguments = (ins
16487    TF_Tensor:$input
16488  );
16489
16490  let results = (outs
16491    TF_Tensor:$output
16492  );
16493
16494  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16495}
16496
16497def TF_StridedSliceOp : TF_Op<"StridedSlice", [NoSideEffect]> {
16498  let summary = "Return a strided slice from `input`.";
16499
16500  let description = [{
16501Note, most python users will want to use the Python `Tensor.__getitem__`
16502or `Variable.__getitem__` rather than this op directly.
16503
16504The goal of this op is to produce a new tensor with a subset of
16505the elements from the `n` dimensional `input` tensor. The subset is chosen using
16506a sequence of `m` sparse range specifications encoded into the arguments
16507of this function. Note, in some cases
16508`m` could be equal to `n`, but this need not be the case. Each
16509range specification entry can be one of the following:
16510
16511- An ellipsis (...). Ellipses are used to imply zero or more
16512  dimensions of full-dimension selection and are produced using
16513  `ellipsis_mask`. For example, `foo[...]` is the identity slice.
16514
16515- A new axis. This is used to insert a new shape=1 dimension and is
16516  produced using `new_axis_mask`. For example, `foo[:, ...]` where
16517  `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
16518
16519
16520- A range `begin:end:stride`. This is used to specify how much to choose from
16521  a given dimension. `stride` can be any integer but 0.  `begin` is an integer
16522  which represents the index of the first value to select while `end` represents
16523  the index of the last value to select. The number of values selected in each
16524  dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
16525  `begin` and `end` can be negative where `-1` is the last element, `-2` is
16526  the second to last. `begin_mask` controls whether to replace the explicitly
16527  given `begin` with an implicit effective value of `0` if `stride > 0` and
16528  `-1` if `stride < 0`. `end_mask` is analogous but produces the number
16529  required to create the largest open interval. For example, given a shape
16530  `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
16531  not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
16532  and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
16533  first dimension of a tensor while dropping the last two (in the original
16534  order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
16535
16536- A single index. This is used to keep only elements that have a given
16537  index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
16538  shape `(6,)` tensor. This is encoded in `begin` and `end` and
16539  `shrink_axis_mask`.
16540
16541Each conceptual range specification is encoded in the op's argument. This
16542encoding is best understand by considering a non-trivial example. In
16543particular,
16544`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
16545
16546```
16547begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
16548end = [2, 4, x, x, -3, x]
16549strides = [1, 1, x, x, -1, 1]
16550begin_mask = 1<<4 | 1<<5 = 48
16551end_mask = 1<<5 = 32
16552ellipsis_mask = 1<<3 = 8
16553new_axis_mask = 1<<2 = 4
16554shrink_axis_mask = 1<<0 = 1
16555```
16556
16557In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
16558the slice becomes (2, 1, 5, 5, 2, 5).
16559Let us walk step by step through each argument specification.
16560
165611.  The first argument in the example slice is turned into `begin = 1` and
16562`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
16563also set the appropriate bit in `shrink_axis_mask`.
16564
165652. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
16566zero bits contributed.
16567
165683. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
16569dimension in the final shape. Dummy values are contributed to begin,
16570end and stride, while the new_axis_mask bit is set.
16571
165724. `...` grab the full ranges from as many dimensions as needed to
16573fully specify a slice for every dimension of the input shape.
16574
165755. `:-3:-1` shows the use of negative indices. A negative index `i` associated
16576with a dimension that has shape `s` is converted to a positive index
16577`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
16578is done internally so begin, end and strides receive x, -3, and -1.
16579The appropriate begin_mask bit is set to indicate the start range is the
16580full range (ignoring the x).
16581
165826. `:` indicates that the entire contents of the corresponding dimension
16583is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
16584receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
16585`end_mask` are also set.
16586
16587*Requirements*:
16588  `0 != strides[i] for i in [0, m)`
16589  `ellipsis_mask must be a power of two (only one ellipsis)`
16590  }];
16591
16592  let arguments = (ins
16593    TF_Tensor:$input,
16594    Arg<TF_I32OrI64Tensor, [{`begin[k]` specifies the offset into the `k`th range specification.
16595The exact dimension this corresponds to will be determined by context.
16596Out-of-bounds values will be silently clamped. If the `k`th bit of
16597`begin_mask` then `begin[k]` is ignored and the full range of the
16598appropriate dimension is used instead. Negative values causes indexing
16599to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.}]>:$begin,
16600    Arg<TF_I32OrI64Tensor, [{`end[i]` is like `begin` with the exception that `end_mask` is
16601used to determine full ranges.}]>:$end,
16602    Arg<TF_I32OrI64Tensor, [{`strides[i]` specifies the increment in the `i`th specification
16603after extracting a given element. Negative indices will reverse
16604the original order. Out or range values are
16605clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`}]>:$strides,
16606
16607    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
16608    DefaultValuedAttr<I64Attr, "0">:$end_mask,
16609    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
16610    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
16611    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
16612  );
16613
16614  let results = (outs
16615    TF_Tensor:$output
16616  );
16617
16618  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
16619  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16620
16621  let hasFolder = 1;
16622
16623  let verifier = [{ return VerifyStridedSliceBase(*this); }];
16624
16625  let extraClassDeclaration = [{
16626    // If sliced shape is able to be deduced, returns true, updates
16627    // `begin_indices`, `end_indices`, and `strides` with their canonical
16628    // values, respectively.
16629    bool GetSlicedBoundRanges(
16630      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
16631      ::llvm::SmallVectorImpl<int64_t> *slice_end,
16632      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
16633  }];
16634}
16635
16636def TF_StridedSliceGradOp : TF_Op<"StridedSliceGrad", [NoSideEffect]> {
16637  let summary = "Returns the gradient of `StridedSlice`.";
16638
16639  let description = [{
16640Since `StridedSlice` cuts out pieces of its `input` which is size
16641`shape`, its gradient will have the same shape (which is passed here
16642as `shape`). The gradient will be zero in any element that the slice
16643does not select.
16644
16645Arguments are the same as StridedSliceGrad with the exception that
16646`dy` is the input gradient to be propagated and `shape` is the
16647shape of `StridedSlice`'s `input`.
16648  }];
16649
16650  let arguments = (ins
16651    TF_I32OrI64Tensor:$shape,
16652    TF_I32OrI64Tensor:$begin,
16653    TF_I32OrI64Tensor:$end,
16654    TF_I32OrI64Tensor:$strides,
16655    TF_Tensor:$dy,
16656
16657    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
16658    DefaultValuedAttr<I64Attr, "0">:$end_mask,
16659    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
16660    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
16661    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
16662  );
16663
16664  let results = (outs
16665    TF_Tensor:$output
16666  );
16667
16668  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<0>;
16669  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
16670
16671  let verifier = [{ return Verify(*this); }];
16672
16673  let extraClassDeclaration = [{
16674    // If sliced shape is able to be deduced, returns true, updates `shape`
16675    // with the final shape after performing StridedSlice, and updates
16676    // `begin_indices`, `end_indices`, and `strides` with their canonical
16677    // values, respectively.
16678    bool GetSlicedShapeAndBoundRanges(
16679      ::llvm::SmallVectorImpl<int64_t> *input_shape,
16680      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
16681      ::llvm::SmallVectorImpl<int64_t> *slice_end,
16682      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
16683  }];
16684}
16685
16686def TF_StringJoinOp : TF_Op<"StringJoin", [NoSideEffect]> {
16687  let summary = [{
16688Joins the strings in the given list of string tensors into one tensor;
16689  }];
16690
16691  let description = [{
16692with the given separator (default is an empty separator).
16693
16694Examples:
16695
16696>>> s = ["hello", "world", "tensorflow"]
16697>>> tf.strings.join(s, " ")
16698<tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
16699  }];
16700
16701  let arguments = (ins
16702    Arg<Variadic<TF_StrTensor>, [{A list of string tensors.  The tensors must all have the same shape,
16703or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
16704of non-scalar inputs.}]>:$inputs,
16705
16706    DefaultValuedAttr<StrAttr, "">:$separator
16707  );
16708
16709  let results = (outs
16710    TF_StrTensor:$output
16711  );
16712
16713  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
16714}
16715
16716def TF_StringStripOp : TF_Op<"StringStrip", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
16717  let summary = "Strip leading and trailing whitespaces from the Tensor.";
16718
16719  let description = [{
16720Examples:
16721
16722>>> tf.strings.strip(["\nTensorFlow", "     The python library    "]).numpy()
16723array([b'TensorFlow', b'The python library'], dtype=object)
16724  }];
16725
16726  let arguments = (ins
16727    Arg<TF_StrTensor, [{A string `Tensor` of any shape.}]>:$input
16728  );
16729
16730  let results = (outs
16731    Res<TF_StrTensor, [{A string `Tensor` of the same shape as the input.}]>:$output
16732  );
16733}
16734
16735def TF_StringToHashBucketFastOp : TF_Op<"StringToHashBucketFast", [NoSideEffect]> {
16736  let summary = [{
16737Converts each string in the input Tensor to its hash mod by a number of buckets.
16738  }];
16739
16740  let description = [{
16741The hash function is deterministic on the content of the string within the
16742process and will never change. However, it is not suitable for cryptography.
16743This function may be used when CPU time is scarce and inputs are trusted or
16744unimportant. There is a risk of adversaries constructing inputs that all hash
16745to the same bucket. To prevent this problem, use a strong hash function with
16746`tf.string_to_hash_bucket_strong`.
16747
16748Examples:
16749
16750>>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
16751array([0, 2, 2])
16752  }];
16753
16754  let arguments = (ins
16755    Arg<TF_StrTensor, [{The strings to assign a hash bucket.}]>:$input,
16756
16757    Confined<I64Attr, [IntMinValue<1>]>:$num_buckets
16758  );
16759
16760  let results = (outs
16761    Res<TF_Int64Tensor, [{A Tensor of the same shape as the input `string_tensor`.}]>:$output
16762  );
16763}
16764
16765def TF_SubOp : TF_Op<"Sub", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
16766               WithBroadcastableBinOpBuilder {
16767  let summary = "Returns x - y element-wise.";
16768
16769  let description = [{
16770*NOTE*: `Subtract` supports broadcasting. More about broadcasting
16771[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
16772  }];
16773
16774  let arguments = (ins
16775    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
16776    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
16777  );
16778
16779  let results = (outs
16780    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
16781  );
16782
16783  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16784
16785  let hasCanonicalizer = 1;
16786
16787  let hasFolder = 1;
16788}
16789
16790def TF_SumOp : TF_Op<"Sum", [NoSideEffect]> {
16791  let summary = "Computes the sum of elements across dimensions of a tensor.";
16792
16793  let description = [{
16794Reduces `input` along the dimensions given in `axis`. Unless
16795`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
16796`axis`. If `keep_dims` is true, the reduced dimensions are
16797retained with length 1.
16798  }];
16799
16800  let arguments = (ins
16801    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
16802    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
16803`[-rank(input), rank(input))`.}]>:$reduction_indices,
16804
16805    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
16806  );
16807
16808  let results = (outs
16809    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
16810  );
16811
16812  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16813  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
16814
16815  let builders = [
16816    OpBuilder<(ins "Value":$input, "Value":$reduction_indices,
16817      "BoolAttr":$keep_dims)>
16818  ];
16819
16820  let hasFolder = 1;
16821}
16822
16823def TF_SvdOp : TF_Op<"Svd", [NoSideEffect]> {
16824  let summary = [{
16825Computes the singular value decompositions of one or more matrices.
16826  }];
16827
16828  let description = [{
16829Computes the SVD of each inner matrix in `input` such that
16830`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
16831
16832```python
16833# a is a tensor containing a batch of matrices.
16834# s is a tensor of singular values for each matrix.
16835# u is the tensor containing the left singular vectors for each matrix.
16836# v is the tensor containing the right singular vectors for each matrix.
16837s, u, v = svd(a)
16838s, _, _ = svd(a, compute_uv=False)
16839```
16840  }];
16841
16842  let arguments = (ins
16843    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
16844form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
16845
16846    DefaultValuedAttr<BoolAttr, "true">:$compute_uv,
16847    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
16848  );
16849
16850  let results = (outs
16851    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Singular values. Shape is `[..., P]`.}]>:$s,
16852    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
16853`[..., M, P]`; if `full_matrices` is `True` then shape is
16854`[..., M, M]`. Undefined if `compute_uv` is `False`.}]>:$u,
16855    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
16856`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
16857Undefined if `compute_uv` is false.}]>:$v
16858  );
16859
16860  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16861}
16862
16863def TF_SymbolicGradientOp : TF_Op<"SymbolicGradient", [NoSideEffect]> {
16864  let summary = [{
16865Computes the gradient function for function f via backpropagation.
16866  }];
16867
16868  let arguments = (ins
16869    Arg<Variadic<TF_Tensor>, [{a list of input tensors of size N + M;}]>:$input,
16870
16871    SymbolRefAttr:$f
16872  );
16873
16874  let results = (outs
16875    Res<Variadic<TF_Tensor>, [{a list of output tensors of size N;}]>:$output
16876  );
16877
16878  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
16879  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
16880}
16881
16882def TF_TPUCompilationResultOp : TF_Op<"TPUCompilationResult", []> {
16883  let summary = "Returns the result of a TPU compilation.";
16884
16885  let description = [{
16886This operation returns the result of a TPU compilation as a serialized
16887CompilationResultProto, which holds a status and an error message if an error
16888occurred during compilation.
16889  }];
16890
16891  let arguments = (ins);
16892
16893  let results = (outs
16894    TF_StrTensor:$output
16895  );
16896}
16897
16898def TF_TPUCompileSucceededAssertOp : TF_Op<"TPUCompileSucceededAssert", []> {
16899  let summary = "Asserts that compilation succeeded.";
16900
16901  let description = [{
16902This op produces no output and closes the device during failure to ensure all
16903pending device interactions fail.
16904
16905'compilation_status' is a serialized CompilationResultProto.
16906  }];
16907
16908  let arguments = (ins
16909    TF_StrTensor:$compilation_status
16910  );
16911
16912  let results = (outs);
16913}
16914
16915def TF_TPUCopyWithLayoutOp : TF_Op<"TPUCopyWithLayout", [NoSideEffect]> {
16916  let summary = "Op that copies host tensor to device with specified layout.";
16917
16918  let description = [{
16919For internal use only.
16920  }];
16921
16922  let arguments = (ins
16923    TF_Tensor:$input,
16924    TF_Int64Tensor:$layout
16925  );
16926
16927  let results = (outs
16928    TF_Tensor:$output
16929  );
16930
16931  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16932}
16933
16934def TF_TPUEmbeddingActivationsOp : TF_Op<"TPUEmbeddingActivations", [NoSideEffect]> {
16935  let summary = "An op enabling differentiation of TPU Embeddings.";
16936
16937  let description = [{
16938This op simply returns its first input, which is assumed to have been sliced
16939from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
16940this op, and its first argument being a trainable Variable, enables automatic
16941differentiation of graphs containing embeddings via the TPU Embedding Python
16942libraries.
16943  }];
16944
16945  let arguments = (ins
16946    Arg<TF_Float32Tensor, [{A trainable variable, enabling optimizers to find this op.}]>:$embedding_variable,
16947    Arg<TF_Float32Tensor, [{The embedding activations Tensor to return.}]>:$sliced_activations,
16948
16949    Confined<I64Attr, [IntMinValue<0>]>:$table_id,
16950    Confined<I64Attr, [IntMinValue<0>]>:$lookup_id
16951  );
16952
16953  let results = (outs
16954    TF_Float32Tensor:$output
16955  );
16956}
16957
16958def TF_TPUExecuteOp : TF_Op<"TPUExecute", [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>]> {
16959  let summary = "Op that loads and executes a TPU program on a TPU device.";
16960
16961  let description = [{
16962For the internal use of the distributed TPU compiler.
16963  }];
16964
16965  let arguments = (ins
16966    Variadic<TF_Tensor>:$args,
16967    TF_StrTensor:$key
16968  );
16969
16970  let results = (outs
16971    Variadic<TF_Tensor>:$results
16972  );
16973
16974  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
16975  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
16976}
16977
16978def TF_TPUExecuteAndUpdateVariablesOp : TF_Op<"TPUExecuteAndUpdateVariables", [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>]> {
16979  let summary = [{
16980Op that executes a program with optional in-place variable updates.
16981  }];
16982
16983  let description = [{
16984It (optionally) reads device variables, loads and executes a TPU program on a
16985TPU device, and then (optionally) in-place updates variables using the program
16986outputs, as specified in attributes device_var_reads_indices (program input
16987indices from directly reading variables) and device_var_updates_indices (program
16988output indices used to update variables, -1 means no-update/read-only). Such
16989program outputs are consumed by these variables will not appear in the op
16990output. For the internal use of the distributed TPU compiler.
16991  }];
16992
16993  let arguments = (ins
16994    Variadic<TF_Tensor>:$args,
16995    TF_StrTensor:$key,
16996
16997    I64ArrayAttr:$device_var_reads_indices,
16998    I64ArrayAttr:$device_var_updates_indices
16999  );
17000
17001  let results = (outs
17002    Variadic<TF_Tensor>:$results
17003  );
17004
17005  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
17006  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
17007
17008  let verifier = [{ return Verify(*this); }];
17009}
17010
17011def TF_TPUGetLayoutOp : TF_Op<"TPUGetLayoutOp", [NoSideEffect]> {
17012  let summary = [{
17013Op that retrieves the layout of an input or output determined by TPUCompile.
17014  }];
17015
17016  let description = [{
17017For internal use only.
17018  }];
17019
17020  let arguments = (ins
17021    TF_StrTensor:$cache_key,
17022
17023    I64Attr:$index,
17024    BoolAttr:$is_output
17025  );
17026
17027  let results = (outs
17028    TF_Int64Tensor:$layout
17029  );
17030}
17031
17032def TF_TPUOrdinalSelectorOp : TF_Op<"TPUOrdinalSelector", []> {
17033  let summary = "A TPU core selector Op.";
17034
17035  let description = [{
17036This Op produces a set of TPU cores (for warm-up) or a single TPU core
17037(for regular inference) to execute the TPU program on. The output is
17038consumed by TPUPartitionedCall.
17039  }];
17040
17041  let arguments = (ins);
17042
17043  let results = (outs
17044    Res<TF_Int32Tensor, [{A vector 1 or more TPU cores.}]>:$device_ordinals
17045  );
17046}
17047
17048def TF_TPUReplicateMetadataOp : TF_Op<"TPUReplicateMetadata", []> {
17049  let summary = [{
17050Metadata indicating how the TPU computation should be replicated.
17051  }];
17052
17053  let description = [{
17054This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
17055  }];
17056
17057  let arguments = (ins
17058    Confined<I64Attr, [IntMinValue<0>]>:$num_replicas,
17059    DefaultValuedAttr<I64Attr, "1">:$num_cores_per_replica,
17060    DefaultValuedAttr<StrAttr, "">:$topology,
17061    DefaultValuedAttr<BoolAttr, "true">:$use_tpu,
17062    DefaultValuedAttr<I64ArrayAttr, "{}">:$device_assignment,
17063    DefaultValuedAttr<I64ArrayAttr, "{}">:$computation_shape,
17064    DefaultValuedAttr<StrArrayAttr, "{}">:$host_compute_core,
17065    DefaultValuedAttr<StrArrayAttr, "{}">:$padding_map,
17066    DefaultValuedAttr<StrAttr, "STEP_MARK_AT_ENTRY">:$step_marker_location,
17067    DefaultValuedAttr<BoolAttr, "false">:$allow_soft_placement,
17068    DefaultValuedAttr<BoolAttr, "false">:$use_spmd_for_xla_partitioning
17069  );
17070
17071  let results = (outs);
17072}
17073
17074def TF_TPUReplicatedInputOp : TF_Op<"TPUReplicatedInput", [NoSideEffect]> {
17075  let summary = "Connects N inputs to an N-way replicated TPU computation.";
17076
17077  let description = [{
17078This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
17079Each replicated input has the same shape and type alongside the output.
17080
17081For example:
17082```
17083%a = "tf.opA"()
17084%b = "tf.opB"()
17085%replicated_input = "tf.TPUReplicatedInput"(%a, %b)
17086%computation = "tf.Computation"(%replicated_input)
17087```
17088The above computation has a replicated input of two replicas.
17089  }];
17090
17091  let arguments = (ins
17092    Variadic<TF_Tensor>:$inputs,
17093
17094    DefaultValuedAttr<BoolAttr, "false">:$is_mirrored_variable,
17095    DefaultValuedAttr<I64Attr, "-1">:$index,
17096    DefaultValuedAttr<BoolAttr, "false">:$is_packed
17097  );
17098
17099  let results = (outs
17100    TF_Tensor:$output
17101  );
17102
17103  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
17104  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17105}
17106
17107def TF_TPUReplicatedOutputOp : TF_Op<"TPUReplicatedOutput", [NoSideEffect]> {
17108  let summary = "Connects N outputs from an N-way replicated TPU computation.";
17109
17110  let description = [{
17111This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
17112Each replicated output has the same shape and type alongside the input.
17113
17114For example:
17115```
17116%computation = "tf.Computation"()
17117%replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
17118```
17119The above computation has a replicated output of two replicas.
17120  }];
17121
17122  let arguments = (ins
17123    TF_Tensor:$input
17124  );
17125
17126  let results = (outs
17127    Variadic<TF_Tensor>:$outputs
17128  );
17129
17130  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17131  TF_DerivedResultSizeAttr num_replicas = TF_DerivedResultSizeAttr<0>;
17132}
17133
17134def TF_TPUReshardVariablesOp : TF_Op<"TPUReshardVariables", []> {
17135  let summary = "Op that reshards on-device TPU variables to specified state.";
17136
17137  let description = [{
17138Op that reshards on-device TPU variables to specified state. Internal use only.
17139
17140The sharding state is represented as the key of the compilation that generated
17141the sharding/unsharding programs along with the main program. new_format_key
17142specifies the desired state, and format_state_var is the current state of the
17143variables.
17144  }];
17145
17146  let arguments = (ins
17147    Arg<Variadic<TF_ResourceTensor>, "", [TF_VariableRead, TF_VariableWrite]>:$vars,
17148    TF_StrTensor:$new_format_key,
17149    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$format_state_var
17150  );
17151
17152  let results = (outs);
17153
17154  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
17155}
17156
17157def TF_TPURoundRobinOp : TF_Op<"TPURoundRobin", []> {
17158  let summary = "A load balancing op that round-robins among TPU cores.";
17159
17160  let description = [{
17161This op round-robins between the integers in [0, NumTPUCoresVisiblePerHost]. It
17162is useful for interfacing with TensorFlow ops that take as input a TPU core on
17163which to execute computations, such as `TPUPartitionedCall`.
17164  }];
17165
17166  let arguments = (ins);
17167
17168  let results = (outs
17169    Res<TF_Int32Tensor, [{An integer in [0, NumTPUCoresVisiblePerHost].}]>:$device_ordinal
17170  );
17171}
17172
17173def TF_TakeDatasetOp : TF_Op<"TakeDataset", [NoSideEffect]> {
17174  let summary = [{
17175Creates a dataset that contains `count` elements from the `input_dataset`.
17176  }];
17177
17178  let arguments = (ins
17179    TF_VariantTensor:$input_dataset,
17180    Arg<TF_Int64Tensor, [{A scalar representing the number of elements from the `input_dataset`
17181that should be taken. A value of `-1` indicates that all of `input_dataset`
17182is taken.}]>:$count,
17183
17184    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
17185    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
17186  );
17187
17188  let results = (outs
17189    TF_VariantTensor:$handle
17190  );
17191}
17192
17193def TF_TanOp : TF_Op<"Tan", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
17194  let summary = "Computes tan of x element-wise.";
17195
17196  let description = [{
17197Given an input tensor, this function computes tangent of every
17198  element in the tensor. Input range is `(-inf, inf)` and
17199  output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
17200  is returned.
17201
17202  ```python
17203  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
17204  tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
17205  ```
17206  }];
17207
17208  let arguments = (ins
17209    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
17210  );
17211
17212  let results = (outs
17213    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
17214  );
17215
17216  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17217}
17218
17219def TF_TanhOp : TF_Op<"Tanh", [NoSideEffect, TF_LayoutAgnostic, TF_SameOperandsAndResultTypeResolveRef]> {
17220  let summary = "Computes hyperbolic tangent of `x` element-wise.";
17221
17222  let description = [{
17223Given an input tensor, this function computes hyperbolic tangent of every
17224  element in the tensor. Input range is `[-inf, inf]` and
17225  output range is `[-1,1]`.
17226
17227  >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
17228  >>> tf.math.tanh(x)
17229  <tf.Tensor: shape=(8,), dtype=float32, numpy=
17230  array([-1.        , -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
17231          0.9640276 ,  0.9950547 ,  1.        ], dtype=float32)>
17232  }];
17233
17234  let arguments = (ins
17235    TF_FpOrComplexTensor:$x
17236  );
17237
17238  let results = (outs
17239    TF_FpOrComplexTensor:$y
17240  );
17241
17242  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17243}
17244
17245def TF_TanhGradOp : TF_Op<"TanhGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
17246  let summary = "Computes the gradient for the tanh of `x` wrt its input.";
17247
17248  let description = [{
17249Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
17250is the corresponding input gradient.
17251  }];
17252
17253  let arguments = (ins
17254    TF_FpOrComplexTensor:$y,
17255    TF_FpOrComplexTensor:$dy
17256  );
17257
17258  let results = (outs
17259    TF_FpOrComplexTensor:$z
17260  );
17261
17262  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17263}
17264
17265def TF_TensorArrayCloseV3Op : TF_Op<"TensorArrayCloseV3", []> {
17266  let summary = "Delete the TensorArray from its resource container.";
17267
17268  let description = [{
17269This enables the user to close and release the resource in the middle
17270of a step/run.
17271  }];
17272
17273  let arguments = (ins
17274    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayFree]>:$handle
17275  );
17276
17277  let results = (outs);
17278}
17279
17280def TF_TensorArrayConcatV3Op : TF_Op<"TensorArrayConcatV3", []> {
17281  let summary = "Concat the elements from the TensorArray into value `value`.";
17282
17283  let description = [{
17284Takes `T` elements of shapes
17285
17286  ```
17287  (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
17288  ```
17289
17290and concatenates them into a Tensor of shape:
17291
17292  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
17293
17294All elements must have the same shape (excepting the first dimension).
17295  }];
17296
17297  let arguments = (ins
17298    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
17299    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
17300
17301    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape_except0
17302  );
17303
17304  let results = (outs
17305    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along the first
17306axis.}]>:$value,
17307    Res<TF_Int64Tensor, [{A vector of the row sizes of the original T elements in the
17308value output.  In the example above, this would be the values:
17309`(n1, n2, ..., n(T-1))`.}]>:$lengths
17310  );
17311
17312  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17313}
17314
17315def TF_TensorArrayGatherV3Op : TF_Op<"TensorArrayGatherV3", []> {
17316  let summary = [{
17317Gather specific elements from the TensorArray into output `value`.
17318  }];
17319
17320  let description = [{
17321All elements selected by `indices` must have the same shape.
17322  }];
17323
17324  let arguments = (ins
17325    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
17326    Arg<TF_Int32Tensor, [{The locations in the TensorArray from which to read tensor elements.}]>:$indices,
17327    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
17328
17329    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape
17330  );
17331
17332  let results = (outs
17333    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along a new
17334axis (the new dimension 0).}]>:$value
17335  );
17336
17337  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17338}
17339
17340def TF_TensorArrayGradV3Op : TF_Op<"TensorArrayGradV3", []> {
17341  let summary = [{
17342Creates a TensorArray for storing the gradients of values in the given handle.
17343  }];
17344
17345  let description = [{
17346If the given TensorArray gradient already exists, returns a reference to it.
17347
17348Locks the size of the original TensorArray by disabling its dynamic size flag.
17349
17350**A note about the input flow_in:**
17351
17352The handle flow_in forces the execution of the gradient lookup to occur
17353only after certain other operations have occurred.  For example, when
17354the forward TensorArray is dynamically sized, writes to this TensorArray
17355may resize the object.  The gradient TensorArray is statically sized based
17356on the size of the forward TensorArray when this operation executes.
17357Furthermore, the size of the forward TensorArray is frozen by this call.
17358As a result, the flow is used to ensure that the call to generate the gradient
17359TensorArray only happens after all writes are executed.
17360
17361In the case of dynamically sized TensorArrays, gradient computation should
17362only be performed on read operations that have themselves been chained via
17363flow to occur only after all writes have executed. That way the final size
17364of the forward TensorArray is known when this operation is called.
17365
17366**A note about the source attribute:**
17367
17368TensorArray gradient calls use an accumulator TensorArray object.  If
17369multiple gradients are calculated and run in the same session, the multiple
17370gradient nodes may accidentally flow through the same accumulator TensorArray.
17371This double counts and generally breaks the TensorArray gradient flow.
17372
17373The solution is to identify which gradient call this particular
17374TensorArray gradient is being called in.  This is performed by identifying
17375a unique string (e.g. "gradients", "gradients_1", ...) from the input
17376gradient Tensor's name.  This string is used as a suffix when creating
17377the TensorArray gradient object here (the attribute `source`).
17378
17379The attribute `source` is added as a suffix to the forward TensorArray's
17380name when performing the creation / lookup, so that each separate gradient
17381calculation gets its own TensorArray accumulator.
17382  }];
17383
17384  let arguments = (ins
17385    Arg<TF_ResourceTensor, [{The handle to the forward TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
17386    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
17387
17388    StrAttr:$source
17389  );
17390
17391  let results = (outs
17392    Res<TF_ResourceTensor, "", [TF_TensorArrayAlloc]>:$grad_handle,
17393    TF_Float32Tensor:$flow_out
17394  );
17395}
17396
17397def TF_TensorArrayReadV3Op : TF_Op<"TensorArrayReadV3", []> {
17398  let summary = "Read an element from the TensorArray into output `value`.";
17399
17400  let arguments = (ins
17401    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
17402    TF_Int32Tensor:$index,
17403    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
17404  );
17405
17406  let results = (outs
17407    Res<TF_Tensor, [{The tensor that is read from the TensorArray.}]>:$value
17408  );
17409
17410  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17411}
17412
17413def TF_TensorArrayScatterV3Op : TF_Op<"TensorArrayScatterV3", []> {
17414  let summary = [{
17415Scatter the data from the input value into specific TensorArray elements.
17416  }];
17417
17418  let description = [{
17419`indices` must be a vector, its length must match the first dim of `value`.
17420  }];
17421
17422  let arguments = (ins
17423    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
17424    Arg<TF_Int32Tensor, [{The locations at which to write the tensor elements.}]>:$indices,
17425    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
17426    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
17427  );
17428
17429  let results = (outs
17430    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
17431  );
17432
17433  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
17434}
17435
17436def TF_TensorArraySizeV3Op : TF_Op<"TensorArraySizeV3", []> {
17437  let summary = "Get the current size of the TensorArray.";
17438
17439  let arguments = (ins
17440    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayRead]>:$handle,
17441    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
17442  );
17443
17444  let results = (outs
17445    Res<TF_Int32Tensor, [{The current size of the TensorArray.}]>:$size
17446  );
17447}
17448
17449def TF_TensorArraySplitV3Op : TF_Op<"TensorArraySplitV3", []> {
17450  let summary = [{
17451Split the data from the input value into TensorArray elements.
17452  }];
17453
17454  let description = [{
17455Assuming that `lengths` takes on values
17456
17457  ```(n0, n1, ..., n(T-1))```
17458
17459and that `value` has shape
17460
17461  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
17462
17463this splits values into a TensorArray with T tensors.
17464
17465TensorArray index t will be the subtensor of values with starting position
17466
17467  ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
17468
17469and having size
17470
17471  ```nt x d0 x d1 x ...```
17472  }];
17473
17474  let arguments = (ins
17475    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
17476    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
17477    Arg<TF_Int64Tensor, [{The vector of lengths, how to split the rows of value into the
17478TensorArray.}]>:$lengths,
17479    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
17480  );
17481
17482  let results = (outs
17483    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
17484  );
17485
17486  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
17487}
17488
17489def TF_TensorArrayV3Op : TF_Op<"TensorArrayV3", []> {
17490  let summary = "An array of Tensors of given size.";
17491
17492  let description = [{
17493Write data via Write and read via Read or Pack.
17494  }];
17495
17496  let arguments = (ins
17497    Arg<TF_Int32Tensor, [{The size of the array.}]>:$size,
17498
17499    TypeAttr:$dtype,
17500    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape,
17501    DefaultValuedAttr<BoolAttr, "false">:$dynamic_size,
17502    DefaultValuedAttr<BoolAttr, "true">:$clear_after_read,
17503    DefaultValuedAttr<BoolAttr, "false">:$identical_element_shapes,
17504    DefaultValuedAttr<StrAttr, "">:$tensor_array_name
17505  );
17506
17507  let results = (outs
17508    Res<TF_ResourceTensor, [{The handle to the TensorArray.}], [TF_TensorArrayAlloc]>:$handle,
17509    Res<TF_Float32Tensor, [{A scalar used to control gradient flow.}]>:$flow
17510  );
17511}
17512
17513def TF_TensorArrayWriteV3Op : TF_Op<"TensorArrayWriteV3", []> {
17514  let summary = "Push an element onto the tensor_array.";
17515
17516  let arguments = (ins
17517    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
17518    Arg<TF_Int32Tensor, [{The position to write to inside the TensorArray.}]>:$index,
17519    Arg<TF_Tensor, [{The tensor to write to the TensorArray.}]>:$value,
17520    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
17521  );
17522
17523  let results = (outs
17524    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
17525  );
17526
17527  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
17528}
17529
17530def TF_TensorListConcatV2Op : TF_Op<"TensorListConcatV2", [NoSideEffect]> {
17531  let summary = "Concats all tensors in the list along the 0th dimension.";
17532
17533  let description = [{
17534Requires that all tensors have the same shape except the first dimension.
17535
17536input_handle: The input list.
17537element_shape: The shape of the uninitialized elements in the list. If the first
17538  dimension is not -1, it is assumed that all list elements have the same
17539  leading dim.
17540leading_dims: The list of leading dims of uninitialized list elements. Used if
17541  the leading dim of input_handle.element_shape or the element_shape input arg
17542  is not already set.
17543tensor: The concated result.
17544lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
17545  }];
17546
17547  let arguments = (ins
17548    TF_VariantTensor:$input_handle,
17549    TF_I32OrI64Tensor:$element_shape,
17550    TF_Int64Tensor:$leading_dims
17551  );
17552
17553  let results = (outs
17554    TF_Tensor:$tensor,
17555    TF_Int64Tensor:$lengths
17556  );
17557
17558  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
17559  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
17560}
17561
17562def TF_TensorListElementShapeOp : TF_Op<"TensorListElementShape", [NoSideEffect]> {
17563  let summary = "The shape of the elements of the given list, as a tensor.";
17564
17565  let description = [{
17566input_handle: the list
17567  element_shape: the shape of elements of the list
17568  }];
17569
17570  let arguments = (ins
17571    TF_VariantTensor:$input_handle
17572  );
17573
17574  let results = (outs
17575    TF_I32OrI64Tensor:$element_shape
17576  );
17577
17578  TF_DerivedResultTypeAttr shape_type = TF_DerivedResultTypeAttr<0>;
17579
17580  let hasFolder = 1;
17581}
17582
17583def TF_TensorListFromTensorOp : TF_Op<"TensorListFromTensor", [NoSideEffect]> {
17584  let summary = [{
17585Creates a TensorList which, when stacked, has the value of `tensor`.
17586  }];
17587
17588  let description = [{
17589Each tensor in the result list corresponds to one row of the input tensor.
17590
17591tensor: The input tensor.
17592output_handle: The list.
17593  }];
17594
17595  let arguments = (ins
17596    TF_Tensor:$tensor,
17597    TF_I32OrI64Tensor:$element_shape
17598  );
17599
17600  let results = (outs
17601    TF_VariantTensor:$output_handle
17602  );
17603
17604  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<0>;
17605  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
17606}
17607
17608def TF_TensorListGatherOp : TF_Op<"TensorListGather", [NoSideEffect]> {
17609  let summary = "Creates a Tensor by indexing into the TensorList.";
17610
17611  let description = [{
17612Each row in the produced Tensor corresponds to the element in the TensorList
17613specified by the given index (see `tf.gather`).
17614
17615input_handle: The input tensor list.
17616indices: The indices used to index into the list.
17617values: The tensor.
17618  }];
17619
17620  let arguments = (ins
17621    TF_VariantTensor:$input_handle,
17622    TF_Int32Tensor:$indices,
17623    TF_Int32Tensor:$element_shape
17624  );
17625
17626  let results = (outs
17627    TF_Tensor:$values
17628  );
17629
17630  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
17631}
17632
17633def TF_TensorListGetItemOp : TF_Op<"TensorListGetItem", [NoSideEffect]> {
17634  let summary = "";
17635
17636  let arguments = (ins
17637    TF_VariantTensor:$input_handle,
17638    TF_Int32Tensor:$index,
17639    TF_Int32Tensor:$element_shape
17640  );
17641
17642  let results = (outs
17643    TF_Tensor:$item
17644  );
17645
17646  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
17647}
17648
17649def TF_TensorListLengthOp : TF_Op<"TensorListLength", [NoSideEffect]> {
17650  let summary = "Returns the number of tensors in the input tensor list.";
17651
17652  let description = [{
17653input_handle: the input list
17654length: the number of tensors in the list
17655  }];
17656
17657  let arguments = (ins
17658    TF_VariantTensor:$input_handle
17659  );
17660
17661  let results = (outs
17662    TF_Int32Tensor:$length
17663  );
17664}
17665
17666def TF_TensorListPopBackOp : TF_Op<"TensorListPopBack", [NoSideEffect]> {
17667  let summary = [{
17668Returns the last element of the input list as well as a list with all but that element.
17669  }];
17670
17671  let description = [{
17672Fails if the list is empty.
17673
17674input_handle: the input list
17675tensor: the withdrawn last element of the list
17676element_dtype: the type of elements in the list
17677element_shape: the shape of the output tensor
17678  }];
17679
17680  let arguments = (ins
17681    TF_VariantTensor:$input_handle,
17682    TF_Int32Tensor:$element_shape
17683  );
17684
17685  let results = (outs
17686    TF_VariantTensor:$output_handle,
17687    TF_Tensor:$tensor
17688  );
17689
17690  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<1>;
17691}
17692
17693def TF_TensorListPushBackOp : TF_Op<"TensorListPushBack", [NoSideEffect]> {
17694  let summary = [{
17695Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
17696  }];
17697
17698  let description = [{
17699tensor: The tensor to put on the list.
17700input_handle: The old list.
17701output_handle: A list with the elements of the old list followed by tensor.
17702element_dtype: the type of elements in the list.
17703element_shape: a shape compatible with that of elements in the list.
17704  }];
17705
17706  let arguments = (ins
17707    TF_VariantTensor:$input_handle,
17708    TF_Tensor:$tensor
17709  );
17710
17711  let results = (outs
17712    TF_VariantTensor:$output_handle
17713  );
17714
17715  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
17716}
17717
17718def TF_TensorListResizeOp : TF_Op<"TensorListResize", [NoSideEffect]> {
17719  let summary = "Resizes the list.";
17720
17721  let description = [{
17722input_handle: the input list
17723size: size of the output list
17724  }];
17725
17726  let arguments = (ins
17727    TF_VariantTensor:$input_handle,
17728    TF_Int32Tensor:$size
17729  );
17730
17731  let results = (outs
17732    TF_VariantTensor:$output_handle
17733  );
17734}
17735
17736def TF_TensorListScatterIntoExistingListOp : TF_Op<"TensorListScatterIntoExistingList", [NoSideEffect]> {
17737  let summary = "Scatters tensor at indices in an input list.";
17738
17739  let description = [{
17740Each member of the TensorList corresponds to one row of the input tensor,
17741specified by the given index (see `tf.gather`).
17742
17743input_handle: The list to scatter into.
17744tensor: The input tensor.
17745indices: The indices used to index into the list.
17746output_handle: The TensorList.
17747  }];
17748
17749  let arguments = (ins
17750    TF_VariantTensor:$input_handle,
17751    TF_Tensor:$tensor,
17752    TF_Int32Tensor:$indices
17753  );
17754
17755  let results = (outs
17756    TF_VariantTensor:$output_handle
17757  );
17758
17759  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
17760}
17761
17762def TF_TensorListSetItemOp : TF_Op<"TensorListSetItem", [NoSideEffect]> {
17763  let summary = "";
17764
17765  let arguments = (ins
17766    TF_VariantTensor:$input_handle,
17767    TF_Int32Tensor:$index,
17768    TF_Tensor:$item
17769  );
17770
17771  let results = (outs
17772    TF_VariantTensor:$output_handle
17773  );
17774
17775  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<2>;
17776}
17777
17778def TF_TensorListStackOp : TF_Op<"TensorListStack", [NoSideEffect]> {
17779  let summary = "Stacks all tensors in the list.";
17780
17781  let description = [{
17782Requires that all tensors have the same shape.
17783
17784input_handle: the input list
17785tensor: the gathered result
17786num_elements: optional. If not -1, the number of elements in the list.
17787  }];
17788
17789  let arguments = (ins
17790    TF_VariantTensor:$input_handle,
17791    TF_Int32Tensor:$element_shape,
17792
17793    DefaultValuedAttr<I64Attr, "-1">:$num_elements
17794  );
17795
17796  let results = (outs
17797    TF_Tensor:$tensor
17798  );
17799
17800  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
17801
17802  let verifier = [{
17803    return Verify(*this);
17804  }];
17805}
17806
17807def TF_TensorScatterAddOp : TF_Op<"TensorScatterAdd", [NoSideEffect]> {
17808  let summary = [{
17809Adds sparse `updates` to an existing tensor according to `indices`.
17810  }];
17811
17812  let description = [{
17813This operation creates a new tensor by adding sparse `updates` to the passed
17814in `tensor`.
17815This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates
17816are added onto an existing tensor (as opposed to a variable). If the memory
17817for the existing tensor cannot be re-used, a copy is made and updated.
17818
17819`indices` is an integer tensor containing indices into a new tensor of shape
17820`tensor.shape`.  The last dimension of `indices` can be at most the rank of
17821`tensor.shape`:
17822
17823    indices.shape[-1] <= tensor.shape.rank
17824
17825The last dimension of `indices` corresponds to indices into elements
17826(if `indices.shape[-1] = tensor.shape.rank`) or slices
17827(if `indices.shape[-1] < tensor.shape.rank`) along dimension
17828`indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
17829
17830    indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
17831
17832The simplest form of tensor_scatter_add is to add individual elements to a
17833tensor by index. For example, say we want to add 4 elements in a rank-1
17834tensor with 8 elements.
17835
17836In Python, this scatter add operation would look like this:
17837
17838```python
17839    indices = tf.constant([[4], [3], [1], [7]])
17840    updates = tf.constant([9, 10, 11, 12])
17841    tensor = tf.ones([8], dtype=tf.int32)
17842    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
17843    print(updated)
17844```
17845
17846The resulting tensor would look like this:
17847
17848    [1, 12, 1, 11, 10, 1, 1, 13]
17849
17850We can also, insert entire slices of a higher rank tensor all at once. For
17851example, if we wanted to insert two slices in the first dimension of a
17852rank-3 tensor with two matrices of new values.
17853
17854In Python, this scatter add operation would look like this:
17855
17856```python
17857    indices = tf.constant([[0], [2]])
17858    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
17859                            [7, 7, 7, 7], [8, 8, 8, 8]],
17860                           [[5, 5, 5, 5], [6, 6, 6, 6],
17861                            [7, 7, 7, 7], [8, 8, 8, 8]]])
17862    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
17863    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
17864    print(updated)
17865```
17866
17867The resulting tensor would look like this:
17868
17869    [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
17870     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
17871     [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
17872     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
17873
17874Note that on CPU, if an out of bound index is found, an error is returned.
17875On GPU, if an out of bound index is found, the index is ignored.
17876  }];
17877
17878  let arguments = (ins
17879    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
17880    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
17881    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
17882  );
17883
17884  let results = (outs
17885    Res<TF_Tensor, [{A new tensor copied from tensor and updates added according to the indices.}]>:$output
17886  );
17887
17888  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17889  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17890
17891  let builders = [
17892    OpBuilder<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
17893    [{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]>
17894  ];
17895}
17896
17897def TF_TensorScatterMaxOp : TF_Op<"TensorScatterMax", [NoSideEffect]> {
17898  let summary = "";
17899
17900  let arguments = (ins
17901    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
17902    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
17903    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
17904  );
17905
17906  let results = (outs
17907    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.}]>:$output
17908  );
17909
17910  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17911  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17912}
17913
17914def TF_TensorScatterMinOp : TF_Op<"TensorScatterMin", [NoSideEffect]> {
17915  let summary = "";
17916
17917  let arguments = (ins
17918    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
17919    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
17920    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
17921  );
17922
17923  let results = (outs
17924    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices.}]>:$output
17925  );
17926
17927  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17928  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17929}
17930
17931def TF_TensorScatterSubOp : TF_Op<"TensorScatterSub", [NoSideEffect]> {
17932  let summary = [{
17933Subtracts sparse `updates` from an existing tensor according to `indices`.
17934  }];
17935
17936  let description = [{
17937This operation creates a new tensor by subtracting sparse `updates` from the
17938passed in `tensor`.
17939This operation is very similar to `tf.scatter_nd_sub`, except that the updates
17940are subtracted from an existing tensor (as opposed to a variable). If the memory
17941for the existing tensor cannot be re-used, a copy is made and updated.
17942
17943`indices` is an integer tensor containing indices into a new tensor of shape
17944`shape`.  The last dimension of `indices` can be at most the rank of `shape`:
17945
17946    indices.shape[-1] <= shape.rank
17947
17948The last dimension of `indices` corresponds to indices into elements
17949(if `indices.shape[-1] = shape.rank`) or slices
17950(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
17951`shape`.  `updates` is a tensor with shape
17952
17953    indices.shape[:-1] + shape[indices.shape[-1]:]
17954
17955The simplest form of tensor_scatter_sub is to subtract individual elements
17956from a tensor by index. For example, say we want to insert 4 scattered elements
17957in a rank-1 tensor with 8 elements.
17958
17959In Python, this scatter subtract operation would look like this:
17960
17961```python
17962    indices = tf.constant([[4], [3], [1], [7]])
17963    updates = tf.constant([9, 10, 11, 12])
17964    tensor = tf.ones([8], dtype=tf.int32)
17965    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
17966    print(updated)
17967```
17968
17969The resulting tensor would look like this:
17970
17971    [1, -10, 1, -9, -8, 1, 1, -11]
17972
17973We can also, insert entire slices of a higher rank tensor all at once. For
17974example, if we wanted to insert two slices in the first dimension of a
17975rank-3 tensor with two matrices of new values.
17976
17977In Python, this scatter add operation would look like this:
17978
17979```python
17980    indices = tf.constant([[0], [2]])
17981    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
17982                            [7, 7, 7, 7], [8, 8, 8, 8]],
17983                           [[5, 5, 5, 5], [6, 6, 6, 6],
17984                            [7, 7, 7, 7], [8, 8, 8, 8]]])
17985    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
17986    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
17987    print(updated)
17988```
17989
17990The resulting tensor would look like this:
17991
17992    [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
17993     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
17994     [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
17995     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
17996
17997Note that on CPU, if an out of bound index is found, an error is returned.
17998On GPU, if an out of bound index is found, the index is ignored.
17999  }];
18000
18001  let arguments = (ins
18002    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
18003    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
18004    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
18005  );
18006
18007  let results = (outs
18008    Res<TF_Tensor, [{A new tensor copied from tensor and updates subtracted according to the indices.}]>:$output
18009  );
18010
18011  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18012  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18013}
18014
18015def TF_TensorScatterUpdateOp : TF_Op<"TensorScatterUpdate", [NoSideEffect]> {
18016  let summary = [{
18017Scatter `updates` into an existing tensor according to `indices`.
18018  }];
18019
18020  let description = [{
18021This operation creates a new tensor by applying sparse `updates` to the passed
18022in `tensor`.
18023This operation is very similar to `tf.scatter_nd`, except that the updates are
18024scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
18025for the existing tensor cannot be re-used, a copy is made and updated.
18026
18027If `indices` contains duplicates, then we pick the last update for the index.
18028
18029If an out of bound index is found on CPU, an error is returned.
18030
18031**WARNING**: There are some GPU specific semantics for this operation.
18032- If an out of bound index is found, the index is ignored.
18033- The order in which updates are applied is nondeterministic, so the output
18034will be nondeterministic if `indices` contains duplicates.
18035
18036`indices` is an integer tensor containing indices into a new tensor of shape
18037`shape`.
18038
18039* `indices` must have at least 2 axes: `(num_updates, index_depth)`.
18040* The last axis of `indices` is how deep to index into `tensor` so  this index
18041  depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
18042
18043if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
18044if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
18045`tensor`.
18046
18047Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
18048The overall shape of `updates` is:
18049
18050```
18051indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
18052```
18053
18054For usage examples see the python [tf.tensor_scatter_nd_update](
18055https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
18056  }];
18057
18058  let arguments = (ins
18059    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
18060    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
18061    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
18062  );
18063
18064  let results = (outs
18065    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
18066to the indices.}]>:$output
18067  );
18068
18069  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18070  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18071
18072  let verifier = [{ return Verify(*this); }];
18073
18074  let builders = [
18075    OpBuilder<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
18076    [{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]>
18077  ];
18078}
18079
18080def TF_TensorSliceDatasetOp : TF_Op<"TensorSliceDataset", [NoSideEffect, TF_NoConstantFold]> {
18081  let summary = [{
18082Creates a dataset that emits each dim-0 slice of `components` once.
18083  }];
18084
18085  let arguments = (ins
18086    Variadic<TF_Tensor>:$components,
18087
18088    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
18089  );
18090
18091  let results = (outs
18092    TF_VariantTensor:$handle
18093  );
18094
18095  TF_DerivedOperandTypeListAttr Toutput_types = TF_DerivedOperandTypeListAttr<0>;
18096}
18097
18098def TF_TensorStridedSliceUpdateOp : TF_Op<"TensorStridedSliceUpdate", [NoSideEffect]> {
18099  let summary = "Assign `value` to the sliced l-value reference of `input`.";
18100
18101  let description = [{
18102The values of `value` are assigned to the positions in the tensor `input` that
18103are selected by the slice parameters. The slice parameters `begin` `end`
18104`strides` etc. work exactly as in `StridedSlice`.
18105
18106NOTE this op currently does not support broadcasting and so `value`'s shape
18107must be exactly the shape produced by the slice of `input`.
18108  }];
18109
18110  let arguments = (ins
18111    TF_Tensor:$input,
18112    TF_I32OrI64Tensor:$begin,
18113    TF_I32OrI64Tensor:$end,
18114    TF_I32OrI64Tensor:$strides,
18115    TF_Tensor:$value,
18116
18117    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
18118    DefaultValuedAttr<I64Attr, "0">:$end_mask,
18119    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
18120    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
18121    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
18122  );
18123
18124  let results = (outs
18125    TF_Tensor:$output
18126  );
18127
18128  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
18129  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18130}
18131
18132def TF_TileOp : TF_Op<"Tile", [NoSideEffect]> {
18133  let summary = "Constructs a tensor by tiling a given tensor.";
18134
18135  let description = [{
18136This operation creates a new tensor by replicating `input` `multiples` times.
18137The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
18138and the values of `input` are replicated `multiples[i]` times along the 'i'th
18139dimension. For example, tiling `[a b c d]` by `[2]` produces
18140`[a b c d a b c d]`.
18141
18142>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
18143>>> b = tf.constant([1,2], tf.int32)
18144>>> tf.tile(a, b)
18145<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
18146array([[1, 2, 3, 1, 2, 3],
18147       [4, 5, 6, 4, 5, 6]], dtype=int32)>
18148>>> c = tf.constant([2,1], tf.int32)
18149>>> tf.tile(a, c)
18150<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
18151array([[1, 2, 3],
18152       [4, 5, 6],
18153       [1, 2, 3],
18154       [4, 5, 6]], dtype=int32)>
18155>>> d = tf.constant([2,2], tf.int32)
18156>>> tf.tile(a, d)
18157<tf.Tensor: shape=(4, 6), dtype=int32, numpy=
18158array([[1, 2, 3, 1, 2, 3],
18159       [4, 5, 6, 4, 5, 6],
18160       [1, 2, 3, 1, 2, 3],
18161       [4, 5, 6, 4, 5, 6]], dtype=int32)>
18162  }];
18163
18164  let arguments = (ins
18165    Arg<TF_Tensor, [{1-D or higher.}]>:$input,
18166    Arg<TF_I32OrI64Tensor, [{1-D. Length must be the same as the number of dimensions in `input`}]>:$multiples
18167  );
18168
18169  let results = (outs
18170    TF_Tensor:$output
18171  );
18172
18173  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18174  TF_DerivedOperandTypeAttr Tmultiples = TF_DerivedOperandTypeAttr<1>;
18175
18176  let verifier = [{ return Verify(*this); }];
18177
18178  let hasFolder = 1;
18179}
18180
18181def TF_TimestampOp : TF_Op<"Timestamp", []> {
18182  let summary = "Provides the time since epoch in seconds.";
18183
18184  let description = [{
18185Returns the timestamp as a `float64` for seconds since the Unix epoch.
18186
18187Note: the timestamp is computed when the op is executed, not when it is added
18188to the graph.
18189  }];
18190
18191  let arguments = (ins);
18192
18193  let results = (outs
18194    TF_Float64Tensor:$ts
18195  );
18196}
18197
18198def TF_TopKUniqueOp : TF_Op<"TopKUnique", [NoSideEffect]> {
18199  let summary = "Returns the TopK unique values in the array in sorted order.";
18200
18201  let description = [{
18202The running time is proportional to the product of K and the input
18203size. Sorting the whole array is more efficient for sufficiently large
18204values of K. The median-of-medians algorithm is probably faster, but
18205difficult to implement efficiently in XLA. If there are fewer than K
18206unique numbers (not NANs), the results are padded with negative
18207infinity. NaNs are never returned. Subnormal numbers are flushed to
18208zero. If an element appears at multiple indices, the highest index is
18209returned. If a TopK element never appears in the input due to padding
18210values, the indices are padded with negative one. If a padding value
18211appears in the input and padding is needed, the highest index of the
18212padding value will be returned. The semantics are not the same as
18213kth_order_statistic.
18214  }];
18215
18216  let arguments = (ins
18217    TF_Float32Tensor:$input,
18218
18219    I64Attr:$k
18220  );
18221
18222  let results = (outs
18223    TF_Float32Tensor:$topk,
18224    TF_Int32Tensor:$topk_indices
18225  );
18226}
18227
18228def TF_TopKV2Op : TF_Op<"TopKV2", [NoSideEffect]> {
18229  let summary = [{
18230Finds values and indices of the `k` largest elements for the last dimension.
18231  }];
18232
18233  let description = [{
18234If the input is a vector (rank-1), finds the `k` largest entries in the vector
18235and outputs their values and indices as vectors.  Thus `values[j]` is the
18236`j`-th largest entry in `input`, and its index is `indices[j]`.
18237
18238For matrices (resp. higher rank input), computes the top `k` entries in each
18239row (resp. vector along the last dimension).  Thus,
18240
18241    values.shape = indices.shape = input.shape[:-1] + [k]
18242
18243If two elements are equal, the lower-index element appears first.
18244  }];
18245
18246  let arguments = (ins
18247    Arg<TF_IntOrFpTensor, [{1-D or higher with last dimension at least `k`.}]>:$input,
18248    Arg<TF_Int32Tensor, [{0-D.  Number of top elements to look for along the last dimension (along each
18249row for matrices).}]>:$k,
18250
18251    DefaultValuedAttr<BoolAttr, "true">:$sorted
18252  );
18253
18254  let results = (outs
18255    Res<TF_IntOrFpTensor, [{The `k` largest elements along each last dimensional slice.}]>:$values,
18256    Res<TF_Int32Tensor, [{The indices of `values` within the last dimension of `input`.}]>:$indices
18257  );
18258
18259  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18260
18261  let verifier = [{ return Verify(*this); }];
18262}
18263
18264def TF_TopKWithUniqueOp : TF_Op<"TopKWithUnique", [NoSideEffect]> {
18265  let summary = "Returns the TopK values in the array in sorted order.";
18266
18267  let description = [{
18268This is a combination of MakeUnique and TopKUnique. The returned top-K will
18269have its lower bits replaced by iota, thus it will be close to the original
18270value but not exactly the same. The running time is proportional to the product
18271of K and the input size. NaNs are never returned. Subnormal numbers are flushed
18272to zero.
18273  }];
18274
18275  let arguments = (ins
18276    TF_Float32Tensor:$input,
18277
18278    I64Attr:$k
18279  );
18280
18281  let results = (outs
18282    TF_Float32Tensor:$topk,
18283    TF_Int32Tensor:$topk_indices
18284  );
18285}
18286
18287def TF_TransposeOp : TF_Op<"Transpose", [NoSideEffect]> {
18288  let summary = "Shuffle dimensions of x according to a permutation.";
18289
18290  let description = [{
18291The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
18292  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
18293  }];
18294
18295  let arguments = (ins
18296    TF_Tensor:$x,
18297    TF_I32OrI64Tensor:$perm
18298  );
18299
18300  let results = (outs
18301    TF_Tensor:$y
18302  );
18303
18304  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18305  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
18306
18307  let builders = [
18308    OpBuilder<(ins "Value":$x, "Value":$perm)>
18309  ];
18310
18311  let verifier = [{
18312    return Verify(*this);
18313  }];
18314
18315  let hasFolder = 1;
18316}
18317
18318def TF_TridiagonalSolveOp : TF_Op<"TridiagonalSolve", [NoSideEffect]> {
18319  let summary = "Solves tridiagonal systems of equations.";
18320
18321  let description = [{
18322Solves tridiagonal systems of equations.
18323  Supports batch dimensions and multiple right-hand sides per each left-hand
18324  side.
18325  On CPU, solution is computed via Gaussian elimination with or without partial
18326  pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
18327  library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
18328  Partial pivoting is not yet supported by XLA backends.
18329  }];
18330
18331  let arguments = (ins
18332    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
18333tridiagonal matrices with three rows being the superdiagonal, diagonals, and
18334subdiagonals, in order. The last element of the superdiagonal and the first
18335element of the subdiagonal is ignored.}]>:$diagonals,
18336    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]`, representing K right-hand sides per each
18337left-hand side.}]>:$rhs,
18338
18339    DefaultValuedAttr<BoolAttr, "true">:$partial_pivoting,
18340    DefaultValuedAttr<BoolAttr, "false">:$perturb_singular
18341  );
18342
18343  let results = (outs
18344    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]` containing the solutions}]>:$output
18345  );
18346
18347  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18348}
18349
18350def TF_TruncateDivOp : TF_Op<"TruncateDiv", [NoSideEffect, ResultsBroadcastableShape]>,
18351                       WithBroadcastableBinOpBuilder {
18352  let summary = "Returns x / y element-wise for integer types.";
18353
18354  let description = [{
18355Truncation designates that negative numbers will round fractional quantities
18356toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
18357than Python semantics. See `FloorDiv` for a division function that matches
18358Python Semantics.
18359
18360*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
18361[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18362  }];
18363
18364  let arguments = (ins
18365    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
18366    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
18367  );
18368
18369  let results = (outs
18370    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
18371  );
18372
18373  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18374
18375  let hasCanonicalizer = 1;
18376}
18377
18378def TF_TruncateModOp : TF_Op<"TruncateMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
18379                       WithBroadcastableBinOpBuilder {
18380  let summary = [{
18381Returns element-wise remainder of division. This emulates C semantics in that
18382  }];
18383
18384  let description = [{
18385the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
18386y + truncate_mod(x, y) = x`.
18387
18388*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
18389[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
18390  }];
18391
18392  let arguments = (ins
18393    TF_FpOrI32OrI64Tensor:$x,
18394    TF_FpOrI32OrI64Tensor:$y
18395  );
18396
18397  let results = (outs
18398    TF_FpOrI32OrI64Tensor:$z
18399  );
18400
18401  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18402}
18403
18404def TF_TruncatedNormalOp : TF_Op<"TruncatedNormal", [TF_CannotDuplicate]> {
18405  let summary = "Outputs random values from a truncated normal distribution.";
18406
18407  let description = [{
18408The generated values follow a normal distribution with mean 0 and standard
18409deviation 1, except that values whose magnitude is more than 2 standard
18410deviations from the mean are dropped and re-picked.
18411  }];
18412
18413  let arguments = (ins
18414    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
18415
18416    DefaultValuedAttr<I64Attr, "0">:$seed,
18417    DefaultValuedAttr<I64Attr, "0">:$seed2
18418  );
18419
18420  let results = (outs
18421    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random truncated normal
18422values.}]>:$output
18423  );
18424
18425  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18426  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
18427}
18428
18429def TF_UncompressElementOp : TF_Op<"UncompressElement", [NoSideEffect]> {
18430  let summary = "Uncompresses a compressed dataset element.";
18431
18432  let arguments = (ins
18433    TF_VariantTensor:$compressed
18434  );
18435
18436  let results = (outs
18437    Variadic<TF_Tensor>:$components
18438  );
18439
18440  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
18441  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
18442}
18443
18444def TF_UniqueOp : TF_Op<"Unique", [NoSideEffect]> {
18445  let summary = "Finds unique elements in a 1-D tensor.";
18446
18447  let description = [{
18448This operation returns a tensor `y` containing all of the unique elements of `x`
18449sorted in the same order that they occur in `x`; `x` does not need to be sorted.
18450This operation also returns a tensor `idx` the same size as `x` that contains
18451the index of each value of `x` in the unique output `y`. In other words:
18452
18453`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
18454
18455Examples:
18456
18457```
18458# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
18459y, idx = unique(x)
18460y ==> [1, 2, 4, 7, 8]
18461idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
18462```
18463
18464```
18465# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
18466y, idx = unique(x)
18467y ==> [4, 5, 1, 2, 3]
18468idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
18469```
18470  }];
18471
18472  let arguments = (ins
18473    Arg<TF_Tensor, [{1-D.}]>:$x
18474  );
18475
18476  let results = (outs
18477    Res<TF_Tensor, [{1-D.}]>:$y,
18478    Res<TF_I32OrI64Tensor, [{1-D.}]>:$idx
18479  );
18480
18481  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18482  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
18483}
18484
18485def TF_UnpackOp : TF_Op<"Unpack", [NoSideEffect]> {
18486  let summary = [{
18487Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
18488  }];
18489
18490  let description = [{
18491Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
18492For example, given a tensor of shape `(A, B, C, D)`;
18493
18494If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
18495  and each tensor in `output` will have shape `(B, C, D)`. (Note that the
18496  dimension unpacked along is gone, unlike `split`).
18497
18498If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
18499  and each tensor in `output` will have shape `(A, C, D)`.
18500Etc.
18501
18502This is the opposite of `pack`.
18503  }];
18504
18505  let arguments = (ins
18506    Arg<TF_Tensor, [{1-D or higher, with `axis` dimension size equal to `num`.}]>:$value,
18507
18508    DefaultValuedAttr<I64Attr, "0">:$axis
18509  );
18510
18511  let results = (outs
18512    Res<Variadic<TF_Tensor>, [{The list of tensors unpacked from `value`.}]>:$output
18513  );
18514
18515  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18516  TF_DerivedResultSizeAttr num = TF_DerivedResultSizeAttr<0>;
18517
18518  let verifier = [{ return Verify(*this); }];
18519
18520  let hasCanonicalizer = 1;
18521}
18522
18523def TF_UnsortedSegmentMaxOp : TF_Op<"UnsortedSegmentMax", [NoSideEffect]> {
18524  let summary = "Computes the maximum along segments of a tensor.";
18525
18526  let description = [{
18527Read
18528[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
18529for an explanation of segments.
18530
18531This operator is similar to the unsorted segment sum operator found
18532[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
18533Instead of computing the sum over segments, it computes the maximum such that:
18534
18535\\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
18536that `segment_ids[j...] == i`.
18537
18538If the maximum is empty for a given segment ID `i`, it outputs the smallest
18539possible value for the specific numeric type,
18540`output[i] = numeric_limits<T>::lowest()`.
18541
18542If the given segment ID `i` is negative, then the corresponding value is
18543dropped, and will not be included in the result.
18544
18545<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
18546<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
18547</div>
18548
18549For example:
18550
18551``` python
18552c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
18553tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
18554# ==> [[ 4,  3, 3, 4],
18555#       [5,  6, 7, 8]]
18556```
18557  }];
18558
18559  let arguments = (ins
18560    TF_IntOrFpTensor:$data,
18561    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
18562    TF_I32OrI64Tensor:$num_segments
18563  );
18564
18565  let results = (outs
18566    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
18567dimensions, which are replaced with a single dimension which has size
18568`num_segments`.}]>:$output
18569  );
18570
18571  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18572  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18573  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
18574
18575  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
18576}
18577
18578def TF_UnsortedSegmentMinOp : TF_Op<"UnsortedSegmentMin", [NoSideEffect]> {
18579  let summary = "Computes the minimum along segments of a tensor.";
18580
18581  let description = [{
18582Read
18583[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
18584for an explanation of segments.
18585
18586This operator is similar to the unsorted segment sum operator found
18587[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
18588Instead of computing the sum over segments, it computes the minimum such that:
18589
18590\\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
18591that `segment_ids[j...] == i`.
18592
18593If the minimum is empty for a given segment ID `i`, it outputs the largest
18594possible value for the specific numeric type,
18595`output[i] = numeric_limits<T>::max()`.
18596
18597For example:
18598
18599``` python
18600c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
18601tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
18602# ==> [[ 1,  2, 2, 1],
18603#       [5,  6, 7, 8]]
18604```
18605
18606If the given segment ID `i` is negative, then the corresponding value is
18607dropped, and will not be included in the result.
18608  }];
18609
18610  let arguments = (ins
18611    TF_IntOrFpTensor:$data,
18612    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
18613    TF_I32OrI64Tensor:$num_segments
18614  );
18615
18616  let results = (outs
18617    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
18618dimensions, which are replaced with a single dimension which has size
18619`num_segments`.}]>:$output
18620  );
18621
18622  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18623  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18624  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
18625
18626  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
18627}
18628
18629def TF_UnsortedSegmentProdOp : TF_Op<"UnsortedSegmentProd", [NoSideEffect]> {
18630  let summary = "Computes the product along segments of a tensor.";
18631
18632  let description = [{
18633Read
18634[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
18635for an explanation of segments.
18636
18637This operator is similar to the unsorted segment sum operator found
18638[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
18639Instead of computing the sum over segments, it computes the product of all
18640entries belonging to a segment such that:
18641
18642\\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
18643`j...` such that `segment_ids[j...] == i`.
18644
18645For example:
18646
18647``` python
18648c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
18649tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
18650# ==> [[ 4,  6, 6, 4],
18651#       [5,  6, 7, 8]]
18652```
18653
18654If there is no entry for a given segment ID `i`, it outputs 1.
18655
18656If the given segment ID `i` is negative, then the corresponding value is
18657dropped, and will not be included in the result.
18658  }];
18659
18660  let arguments = (ins
18661    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
18662    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
18663    TF_I32OrI64Tensor:$num_segments
18664  );
18665
18666  let results = (outs
18667    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
18668dimensions, which are replaced with a single dimension which has size
18669`num_segments`.}]>:$output
18670  );
18671
18672  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18673  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18674  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
18675
18676  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
18677}
18678
18679def TF_UnsortedSegmentSumOp : TF_Op<"UnsortedSegmentSum", [NoSideEffect]> {
18680  let summary = "Computes the sum along segments of a tensor.";
18681
18682  let description = [{
18683Read
18684[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
18685for an explanation of segments.
18686
18687Computes a tensor such that
18688\\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
18689that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
18690need not be sorted and need not cover all values in the full
18691range of valid values.
18692
18693If the sum is empty for a given segment ID `i`, `output[i] = 0`.
18694If the given segment ID `i` is negative, the value is dropped and will not be
18695added to the sum of the segment.
18696
18697`num_segments` should equal the number of distinct segment IDs.
18698
18699<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
18700<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
18701</div>
18702
18703``` python
18704c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
18705tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
18706# ==> [[ 5, 5, 5, 5],
18707#       [5, 6, 7, 8]]
18708```
18709  }];
18710
18711  let arguments = (ins
18712    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
18713    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
18714    TF_I32OrI64Tensor:$num_segments
18715  );
18716
18717  let results = (outs
18718    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
18719dimensions, which are replaced with a single dimension which has size
18720`num_segments`.}]>:$output
18721  );
18722
18723  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18724  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18725  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
18726
18727  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
18728}
18729
18730def TF_UpperBoundOp : TF_Op<"UpperBound", [NoSideEffect]> {
18731  let summary = [{
18732Applies upper_bound(sorted_search_values, values) along each row.
18733  }];
18734
18735  let description = [{
18736Each set of rows with the same index in (sorted_inputs, values) is treated
18737independently.  The resulting row is the equivalent of calling
18738`np.searchsorted(sorted_inputs, values, side='right')`.
18739
18740The result is not a global index to the entire
18741`Tensor`, but rather just the index in the last dimension.
18742
18743A 2-D example:
18744  sorted_sequence = [[0, 3, 9, 9, 10],
18745                     [1, 2, 3, 4, 5]]
18746  values = [[2, 4, 9],
18747            [0, 2, 6]]
18748
18749  result = UpperBound(sorted_sequence, values)
18750
18751  result == [[1, 2, 4],
18752             [0, 2, 5]]
18753  }];
18754
18755  let arguments = (ins
18756    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
18757    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
18758the values that will be searched for in `sorted_search_values`.}]>:$values
18759  );
18760
18761  let results = (outs
18762    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the last scalar index
18763into the last dimension where values can be inserted without changing the
18764ordered property.}]>:$output
18765  );
18766
18767  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18768  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
18769}
18770
18771def TF_VarIsInitializedOp : TF_Op<"VarIsInitializedOp", []> {
18772  let summary = [{
18773Checks whether a resource handle-based variable has been initialized.
18774  }];
18775
18776  let arguments = (ins
18777    Arg<TF_ResourceTensor, [{the input resource handle.}], [TF_VariableRead]>:$resource
18778  );
18779
18780  let results = (outs
18781    Res<TF_BoolTensor, [{a scalar boolean which is true if the variable has been
18782initialized.}]>:$is_initialized
18783  );
18784
18785  let hasCanonicalizer = 1;
18786}
18787
18788def TF_VariableOp : TF_Op<"Variable", []> {
18789  let summary = "Use VariableV2 instead.";
18790
18791  let arguments = (ins
18792    TF_ShapeAttr:$shape,
18793    DefaultValuedAttr<StrAttr, "">:$container,
18794    DefaultValuedAttr<StrAttr, "">:$shared_name
18795  );
18796
18797  let results = (outs
18798    TF_Tensor:$ref
18799  );
18800
18801  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
18802
18803  let hasCanonicalizer = 1;
18804}
18805
18806def TF_VariableShapeOp : TF_Op<"VariableShape", []> {
18807  let summary = "Returns the shape of the variable pointed to by `resource`.";
18808
18809  let description = [{
18810This operation returns a 1-D integer tensor representing the shape of `input`.
18811
18812For example:
18813
18814```
18815# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
18816shape(t) ==> [2, 2, 3]
18817```
18818  }];
18819
18820  let arguments = (ins
18821    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$input
18822  );
18823
18824  let results = (outs
18825    TF_I32OrI64Tensor:$output
18826  );
18827
18828  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
18829
18830  let verifier = [{
18831    return Verify(*this);
18832  }];
18833
18834  let hasFolder = 1;
18835}
18836
18837def TF_VariableV2Op : TF_Op<"VariableV2", []> {
18838  let summary = [{
18839Holds state in the form of a tensor that persists across steps.
18840  }];
18841
18842  let description = [{
18843Outputs a ref to the tensor state so it may be read or modified.
18844TODO(zhifengc/mrry): Adds a pointer to a more detail document
18845about sharing states in tensorflow.
18846  }];
18847
18848  let arguments = (ins
18849    TF_ShapeAttr:$shape,
18850    DefaultValuedAttr<StrAttr, "">:$container,
18851    DefaultValuedAttr<StrAttr, "">:$shared_name
18852  );
18853
18854  let results = (outs
18855    Res<TF_Tensor, [{A reference to the variable tensor.}]>:$ref
18856  );
18857
18858  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
18859}
18860
18861def TF_WhereOp : TF_Op<"Where", [NoSideEffect]> {
18862  let summary = "Returns locations of nonzero / true values in a tensor.";
18863
18864  let description = [{
18865This operation returns the coordinates of true elements in `condition`. The
18866coordinates are returned in a 2-D tensor where the first dimension (rows)
18867represents the number of true elements, and the second dimension (columns)
18868represents the coordinates of the true elements. Keep in mind, the shape of
18869the output tensor can vary depending on how many true values there are in
18870`condition`. Indices are output in row-major order.
18871
18872For example:
18873
18874```
18875# 'input' tensor is [[True, False]
18876#                    [True, False]]
18877# 'input' has two true values, so output has two coordinates.
18878# 'input' has rank of 2, so coordinates have two indices.
18879where(input) ==> [[0, 0],
18880                  [1, 0]]
18881
18882# `condition` tensor is [[[True, False]
18883#                     [True, False]]
18884#                    [[False, True]
18885#                     [False, True]]
18886#                    [[False, False]
18887#                     [False, True]]]
18888# 'input' has 5 true values, so output has 5 coordinates.
18889# 'input' has rank of 3, so coordinates have three indices.
18890where(input) ==> [[0, 0, 0],
18891                  [0, 1, 0],
18892                  [1, 0, 1],
18893                  [1, 1, 1],
18894                  [2, 1, 1]]
18895
18896# `condition` tensor is [[[1.5,  0.0]
18897#                     [-0.5, 0.0]]
18898#                    [[0.0,  0.25]
18899#                     [0.0,  0.75]]
18900#                    [[0.0,  0.0]
18901#                     [0.0,  0.01]]]
18902# 'input' has 5 nonzero values, so output has 5 coordinates.
18903# 'input' has rank of 3, so coordinates have three indices.
18904where(input) ==> [[0, 0, 0],
18905                  [0, 1, 0],
18906                  [1, 0, 1],
18907                  [1, 1, 1],
18908                  [2, 1, 1]]
18909
18910# `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
18911#                     [0.0 + 0.5j, 0.0  + 0.0j]]
18912#                    [[0.0 + 0.0j, 0.25 + 1.5j]
18913#                     [0.0 + 0.0j, 0.75 + 0.0j]]
18914#                    [[0.0 + 0.0j, 0.0  + 0.0j]
18915#                     [0.0 + 0.0j, 0.01 + 0.0j]]]
18916# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
18917# 'input' has rank of 3, so coordinates have three indices.
18918where(input) ==> [[0, 0, 0],
18919                  [0, 1, 0],
18920                  [1, 0, 1],
18921                  [1, 1, 1],
18922                  [2, 1, 1]]
18923```
18924  }];
18925
18926  let arguments = (ins
18927    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input
18928  );
18929
18930  let results = (outs
18931    TF_Int64Tensor:$index
18932  );
18933
18934  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18935}
18936
18937def TF_XdivyOp : TF_Op<"Xdivy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
18938                 WithBroadcastableBinOpBuilder {
18939  let summary = "Returns 0 if x == 0, and x / y otherwise, elementwise.";
18940
18941  let arguments = (ins
18942    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
18943    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
18944  );
18945
18946  let results = (outs
18947    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
18948  );
18949
18950  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18951
18952  let hasCanonicalizer = 1;
18953}
18954
18955def TF_XlaBroadcastHelperOp : TF_Op<"XlaBroadcastHelper", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect]> {
18956  let summary = "Helper operator for performing XLA-style broadcasts";
18957
18958  let description = [{
18959Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
18960whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
18961for binary operators.
18962  }];
18963
18964  let arguments = (ins
18965    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS input tensor}]>:$lhs,
18966    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS input tensor}]>:$rhs,
18967    Arg<TF_I32OrI64Tensor, [{an XLA-style broadcast dimension specification}]>:$broadcast_dims
18968  );
18969
18970  let results = (outs
18971    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted LHS tensor}]>:$lhs_output,
18972    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted RHS tensor}]>:$rhs_output
18973  );
18974
18975  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18976  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
18977
18978  let extraClassDeclaration = [{
18979    // InferTypeOpInterface:
18980    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
18981      return ArraysAreCastCompatible(l, r);
18982    }
18983  }];
18984}
18985
18986def TF_XlaClusterOutputOp : TF_Op<"XlaClusterOutput", [NoSideEffect]> {
18987  let summary = [{
18988Operator that connects the output of an XLA computation to other consumer graph nodes.
18989  }];
18990
18991  let arguments = (ins
18992    TF_Tensor:$input
18993  );
18994
18995  let results = (outs
18996    TF_Tensor:$outputs
18997  );
18998
18999  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19000}
19001
19002def TF_XlaConvOp : TF_Op<"XlaConv", [NoSideEffect]> {
19003  let summary = "Wraps the XLA ConvGeneralDilated operator, documented at";
19004
19005  let description = [{
19006https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
19007.
19008  }];
19009
19010  let arguments = (ins
19011    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$lhs,
19012    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the kernel tensor}]>:$rhs,
19013    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
19014    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
19015    Arg<TF_I32OrI64Tensor, [{dilation to apply between input elements}]>:$lhs_dilation,
19016    Arg<TF_I32OrI64Tensor, [{dilation to apply between kernel elements}]>:$rhs_dilation,
19017    Arg<TF_I32OrI64Tensor, [{number of feature groups for grouped convolution.}]>:$feature_group_count,
19018
19019    StrAttr:$dimension_numbers,
19020    StrAttr:$precision_config
19021  );
19022
19023  let results = (outs
19024    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19025  );
19026
19027  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19028  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
19029}
19030
19031def TF_XlaConvV2Op : TF_Op<"XlaConvV2", [NoSideEffect]> {
19032  let summary = "Wraps the XLA ConvGeneralDilated operator, documented at";
19033
19034  let description = [{
19035https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
19036.
19037  }];
19038
19039  let arguments = (ins
19040    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$lhs,
19041    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the kernel tensor}]>:$rhs,
19042    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
19043    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
19044    Arg<TF_I32OrI64Tensor, [{dilation to apply between input elements}]>:$lhs_dilation,
19045    Arg<TF_I32OrI64Tensor, [{dilation to apply between kernel elements}]>:$rhs_dilation,
19046    Arg<TF_I32OrI64Tensor, [{number of feature groups for grouped convolution.}]>:$feature_group_count,
19047
19048    StrAttr:$dimension_numbers,
19049    StrAttr:$precision_config
19050  );
19051
19052  let results = (outs
19053    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19054  );
19055
19056  TF_DerivedOperandTypeAttr LhsT = TF_DerivedOperandTypeAttr<0>;
19057  TF_DerivedOperandTypeAttr RhsT = TF_DerivedOperandTypeAttr<1>;
19058  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
19059  TF_DerivedResultTypeAttr preferred_element_type = TF_DerivedResultTypeAttr<0>;
19060}
19061
19062def TF_XlaDotOp : TF_Op<"XlaDot", [NoSideEffect]> {
19063  let summary = "Wraps the XLA DotGeneral operator, documented at";
19064
19065  let description = [{
19066https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
19067.
19068  }];
19069
19070  let arguments = (ins
19071    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS tensor}]>:$lhs,
19072    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS tensor}]>:$rhs,
19073
19074    StrAttr:$dimension_numbers,
19075    StrAttr:$precision_config
19076  );
19077
19078  let results = (outs
19079    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19080  );
19081
19082  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19083}
19084
19085def TF_XlaDotV2Op : TF_Op<"XlaDotV2", [NoSideEffect]> {
19086  let summary = "Wraps the XLA DotGeneral operator, documented at";
19087
19088  let description = [{
19089https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
19090.
19091  }];
19092
19093  let arguments = (ins
19094    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS tensor}]>:$lhs,
19095    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS tensor}]>:$rhs,
19096
19097    StrAttr:$dimension_numbers,
19098    StrAttr:$precision_config
19099  );
19100
19101  let results = (outs
19102    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19103  );
19104
19105  TF_DerivedOperandTypeAttr LhsT = TF_DerivedOperandTypeAttr<0>;
19106  TF_DerivedOperandTypeAttr RhsT = TF_DerivedOperandTypeAttr<1>;
19107  TF_DerivedResultTypeAttr preferred_element_type = TF_DerivedResultTypeAttr<0>;
19108}
19109
19110def TF_XlaDynamicSliceOp : TF_Op<"XlaDynamicSlice", [NoSideEffect]> {
19111  let summary = "Wraps the XLA DynamicSlice operator, documented at";
19112
19113  let description = [{
19114https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
19115.
19116
19117DynamicSlice extracts a sub-array from the input array at dynamic
19118start_indices. The size of the slice in each dimension is passed in
19119size_indices, which specify the end point of exclusive slice intervals in each
19120dimension -- [start, start + size). The shape of start_indices must have rank 1,
19121with dimension size equal to the rank of operand.
19122  }];
19123
19124  let arguments = (ins
19125    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
19126    Arg<TF_I32OrI64Tensor, [{List of N integers containing the slice size for each
19127dimension. Each value must be strictly greater than zero, and start + size
19128must be less than or equal to the size of the dimension to avoid
19129implementation defined behavior.}]>:$start_indices,
19130    TF_I32OrI64Tensor:$size_indices
19131  );
19132
19133  let results = (outs
19134    TF_Tensor:$output
19135  );
19136
19137  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19138  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
19139}
19140
19141def TF_XlaDynamicUpdateSliceOp : TF_Op<"XlaDynamicUpdateSlice", [NoSideEffect]> {
19142  let summary = "Wraps the XLA DynamicUpdateSlice operator, documented at";
19143
19144  let description = [{
19145https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
19146.
19147
19148XlaDynamicUpdateSlice generates a result which is the value of the `input`
19149operand, with a slice update overwritten at `indices`. The shape of `update`
19150determines the shape of the sub-array of the result which is updated. The shape
19151of indices must be rank == 1, with dimension size equal to the rank of `input`.
19152
19153Handling of out-of-bounds slice indices is implementation-defined.
19154  }];
19155
19156  let arguments = (ins
19157    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
19158    Arg<TF_Tensor, [{A `Tensor` of type T. Same rank as `input`.}]>:$update,
19159    Arg<TF_I32OrI64Tensor, [{A vector of indices into `input`. Must have length equal to the rank of
19160`input`.}]>:$indices
19161  );
19162
19163  let results = (outs
19164    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
19165  );
19166
19167  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19168  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
19169}
19170
19171def TF_XlaEinsumOp : TF_Op<"XlaEinsum", [NoSideEffect]> {
19172  let summary = [{
19173An op which supports basic einsum op with 2 inputs and 1 output.
19174  }];
19175
19176  let description = [{
19177This op has better TPU performance since it doesn't have explicitly reshape and
19178transpose operations as tf.einsum does.
19179  }];
19180
19181  let arguments = (ins
19182    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$a,
19183    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$b,
19184
19185    StrAttr:$equation
19186  );
19187
19188  let results = (outs
19189    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$product
19190  );
19191
19192  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19193}
19194
19195def TF_XlaGatherOp : TF_Op<"XlaGather", [NoSideEffect]> {
19196  let summary = "Wraps the XLA Gather operator documented at";
19197
19198  let description = [{
19199https://www.tensorflow.org/xla/operation_semantics#gather
19200  }];
19201
19202  let arguments = (ins
19203    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The array we're gathering from.}]>:$operand,
19204    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices we gather.}]>:$start_indices,
19205    Arg<TF_I32OrI64Tensor, [{slice_sizes[i] is the bounds for the slice on dimension i.}]>:$slice_sizes,
19206
19207    StrAttr:$dimension_numbers,
19208    BoolAttr:$indices_are_sorted
19209  );
19210
19211  let results = (outs
19212    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19213  );
19214
19215  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19216  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
19217}
19218
19219def TF_XlaKeyValueSortOp : TF_Op<"XlaKeyValueSort", [NoSideEffect]> {
19220  let summary = "Wraps the XLA Sort operator, documented at";
19221
19222  let description = [{
19223https://www.tensorflow.org/performance/xla/operation_semantics#sort
19224.
19225
19226Sorts a tensor. Currently only sorts in ascending order are supported.
19227  }];
19228
19229  let arguments = (ins
19230    Arg<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$keys,
19231    Arg<TF_Tensor, [{A `Tensor` of type V.}]>:$values
19232  );
19233
19234  let results = (outs
19235    Res<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$sorted_keys,
19236    Res<TF_Tensor, [{A `Tensor` of type V.}]>:$sorted_values
19237  );
19238
19239  TF_DerivedOperandTypeAttr K = TF_DerivedOperandTypeAttr<0>;
19240  TF_DerivedOperandTypeAttr V = TF_DerivedOperandTypeAttr<1>;
19241}
19242
19243def TF_XlaPadOp : TF_Op<"XlaPad", [NoSideEffect]> {
19244  let summary = "Wraps the XLA Pad operator, documented at";
19245
19246  let description = [{
19247https://www.tensorflow.org/performance/xla/operation_semantics#pad
19248.
19249  }];
19250
19251  let arguments = (ins
19252    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
19253    Arg<TF_Tensor, [{A scalar `Tensor` of type T.}]>:$padding_value,
19254    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start of each input dimensions. Must
19255be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_low,
19256    Arg<TF_I32OrI64Tensor, [{the padding to apply at the end of each input dimension. Must
19257be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_high,
19258    Arg<TF_I32OrI64Tensor, [{the padding to apply between each input element. Must
19259be a compile-time constant 1D tensor of length equal to rank of input,
19260containing only non-negative values.}]>:$padding_interior
19261  );
19262
19263  let results = (outs
19264    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
19265  );
19266
19267  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19268  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
19269}
19270
19271def TF_XlaRecvFromHostOp : TF_Op<"XlaRecvFromHost", []> {
19272  let summary = "An op to receive a tensor from the host.";
19273
19274  let description = [{
19275output: the tensor that will be received from the host.
19276Toutput: element type for output.
19277shape: shape for output.
19278key: A unique identifier for this region used to match up host transfers.
19279  }];
19280
19281  let arguments = (ins
19282    TF_ShapeAttr:$shape,
19283    StrAttr:$key
19284  );
19285
19286  let results = (outs
19287    TF_Tensor:$output
19288  );
19289
19290  TF_DerivedResultTypeAttr Toutput = TF_DerivedResultTypeAttr<0>;
19291}
19292
19293def TF_XlaReduceOp : TF_Op<"XlaReduce", [NoSideEffect]> {
19294  let summary = "Wraps the XLA Reduce operator, documented at";
19295
19296  let description = [{
19297https://www.tensorflow.org/performance/xla/operation_semantics#reduce .
19298  }];
19299
19300  let arguments = (ins
19301    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
19302    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
19303
19304    I64ArrayAttr:$dimensions_to_reduce,
19305    SymbolRefAttr:$reducer
19306  );
19307
19308  let results = (outs
19309    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19310  );
19311
19312  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19313}
19314
19315def TF_XlaReduceWindowOp : TF_Op<"XlaReduceWindow", [NoSideEffect]> {
19316  let summary = "Wraps the XLA ReduceWindow operator, documented at";
19317
19318  let description = [{
19319https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
19320  }];
19321
19322  let arguments = (ins
19323    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
19324    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
19325    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
19326    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
19327    TF_I32OrI64Tensor:$base_dilations,
19328    TF_I32OrI64Tensor:$window_dilations,
19329    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
19330
19331    SymbolRefAttr:$computation
19332  );
19333
19334  let results = (outs
19335    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19336  );
19337
19338  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19339  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
19340}
19341
19342def TF_XlaRemoveDynamicDimensionSizeOp : TF_Op<"XlaRemoveDynamicDimensionSize", [NoSideEffect]> {
19343  let summary = "Inverse of XlaSetDynamicDimensionSize.";
19344
19345  let description = [{
19346Make an xla bounded dynamic dimension into a static dimension. The bound of the
19347size of dimension `dim_index` becomes the static dimension size.
19348  }];
19349
19350  let arguments = (ins
19351    TF_Tensor:$input,
19352    TF_Int32Tensor:$dim_index
19353  );
19354
19355  let results = (outs
19356    TF_Tensor:$output
19357  );
19358
19359  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19360}
19361
19362def TF_XlaReplicaIdOp : TF_Op<"XlaReplicaId", [NoSideEffect, TF_NoConstantFold]> {
19363  let summary = "Replica ID.";
19364
19365  let arguments = (ins);
19366
19367  let results = (outs
19368    TF_Int32Tensor:$id
19369  );
19370
19371  // Constant folding is disabled for this op as it is a runtime op and can't
19372  // constant folded at the compile time.
19373}
19374
19375def TF_XlaRngBitGeneratorOp : TF_Op<"XlaRngBitGenerator", [NoSideEffect]> {
19376  let summary = "Stateless PRNG bit generator.";
19377
19378  let description = [{
19379Wraps the XLA RngBitGenerator operator, documented at
19380 https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator.
19381  }];
19382
19383  let arguments = (ins
19384    Arg<TF_Int32Tensor, [{The PRNG algorithm to use, one of
19385tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}.}]>:$algorithm,
19386    Arg<TF_Uint64Tensor, [{Initial state for the PRNG algorithm. For THREEFRY, it should be
19387a u64[2] and for PHILOX a u64[3].}]>:$initial_state,
19388    Arg<TF_I32OrI64Tensor, [{The output shape of the generated data.}]>:$shape
19389  );
19390
19391  let results = (outs
19392    TF_Uint64Tensor:$output_key,
19393    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$output
19394  );
19395
19396  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<2>;
19397  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<1>;
19398}
19399
19400def TF_XlaScatterOp : TF_Op<"XlaScatter", [NoSideEffect]> {
19401  let summary = "Wraps the XLA Scatter operator documented at";
19402
19403  let description = [{
19404https://www.tensorflow.org/xla/operation_semantics#scatter.
19405  }];
19406
19407  let arguments = (ins
19408    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array to be scattered into.}]>:$operand,
19409    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices that must
19410be scattered to.}]>:$scatter_indices,
19411    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array containing the values that must be used for scattering.}]>:$updates,
19412
19413    SymbolRefAttr:$update_computation,
19414    StrAttr:$dimension_numbers,
19415    BoolAttr:$indices_are_sorted
19416  );
19417
19418  let results = (outs
19419    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19420  );
19421
19422  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19423  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
19424}
19425
19426def TF_XlaSelectAndScatterOp : TF_Op<"XlaSelectAndScatter", [NoSideEffect]> {
19427  let summary = "Wraps the XLA SelectAndScatter operator, documented at";
19428
19429  let description = [{
19430https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter
19431.
19432  }];
19433
19434  let arguments = (ins
19435    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$operand,
19436    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
19437    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
19438    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
19439    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a tensor of values to scatter}]>:$source,
19440    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the output tensor}]>:$init_value,
19441
19442    SymbolRefAttr:$select,
19443    SymbolRefAttr:$scatter
19444  );
19445
19446  let results = (outs
19447    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
19448  );
19449
19450  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19451  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
19452}
19453
19454def TF_XlaSelfAdjointEigOp : TF_Op<"XlaSelfAdjointEig", [NoSideEffect]> {
19455  let summary = [{
19456Computes the eigen decomposition of a batch of self-adjoint matrices
19457  }];
19458
19459  let description = [{
19460(Note: Only real inputs are supported).
19461
19462Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
19463tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
19464i=0...N-1.
19465  }];
19466
19467  let arguments = (ins
19468    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
19469
19470    BoolAttr:$lower,
19471    I64Attr:$max_iter,
19472    F32Attr:$epsilon
19473  );
19474
19475  let results = (outs
19476    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The eigenvalues in ascending order, each repeated according to its
19477multiplicity.}]>:$w,
19478    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The column v[..., :, i] is the normalized eigenvector corresponding to the
19479eigenvalue w[..., i].}]>:$v
19480  );
19481
19482  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19483}
19484
19485def TF_XlaSendToHostOp : TF_Op<"XlaSendToHost", []> {
19486  let summary = "An op to send a tensor to the host.";
19487
19488  let description = [{
19489input: the tensor that will be sent to the host.
19490Tinput: element type for input.
19491key: A unique identifier for this region used to match up host transfers.
19492  }];
19493
19494  let arguments = (ins
19495    TF_Tensor:$input,
19496
19497    StrAttr:$key
19498  );
19499
19500  let results = (outs);
19501
19502  TF_DerivedOperandTypeAttr Tinput = TF_DerivedOperandTypeAttr<0>;
19503}
19504
19505def TF_XlaSetDynamicDimensionSizeOp : TF_Op<"XlaSetDynamicDimensionSize", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect, TF_NoConstantFold]> {
19506  let summary = "Make a static dimension into a xla bounded dynamic dimension.";
19507
19508  let description = [{
19509The current static dimension size will become the bound and the second
19510        operand becomes the dynamic size of the dimension.
19511  }];
19512
19513  let arguments = (ins
19514    TF_Tensor:$input,
19515    TF_Int32Tensor:$dim_index,
19516    TF_Int32Tensor:$size
19517  );
19518
19519  let results = (outs
19520    TF_Tensor:$output
19521  );
19522
19523  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19524
19525  let extraClassDeclaration = [{
19526    // InferTypeOpInterface:
19527    static bool isCompatibleReturnTypes(TypeRange l, TypeRange r) {
19528      return ArraysAreCastCompatible(l, r);
19529    }
19530  }];
19531}
19532
19533def TF_XlaSortOp : TF_Op<"XlaSort", [NoSideEffect]> {
19534  let summary = "Wraps the XLA Sort operator, documented at";
19535
19536  let description = [{
19537https://www.tensorflow.org/performance/xla/operation_semantics#sort
19538.
19539
19540Sorts a tensor. Currently only sorts in ascending order are supported.
19541  }];
19542
19543  let arguments = (ins
19544    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input
19545  );
19546
19547  let results = (outs
19548    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
19549  );
19550
19551  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19552}
19553
19554def TF_XlaSvdOp : TF_Op<"XlaSvd", [NoSideEffect]> {
19555  let summary = [{
19556Computes the eigen decomposition of a batch of self-adjoint matrices
19557  }];
19558
19559  let description = [{
19560(Note: Only real inputs are supported).
19561
19562Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
19563tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
19564  }];
19565
19566  let arguments = (ins
19567    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
19568
19569    I64Attr:$max_iter,
19570    F32Attr:$epsilon,
19571    StrAttr:$precision_config
19572  );
19573
19574  let results = (outs
19575    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Singular values. The values are sorted in reverse order of magnitude, so
19576s[..., 0] is the largest value, s[..., 1] is the second largest, etc.}]>:$s,
19577    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Left singular vectors.}]>:$u,
19578    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Right singular vectors.}]>:$v
19579  );
19580
19581  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19582}
19583
19584def TF_XlaVariadicReduceOp : TF_Op<"XlaVariadicReduce", [NoSideEffect, SameVariadicOperandSize]> {
19585  let summary = "Wraps the variadic XLA Reduce operator.";
19586
19587  let description = [{
19588Semantics are documented at
19589 https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.
19590
19591This version is limited to operands of the same dtype.
19592XlaVariadicReduceV2 is a version that supports heterogeneous operands.
19593  }];
19594
19595  let arguments = (ins
19596    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{the input tensor(s)}]>:$input,
19597    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{scalar initial value(s) for the reduction}]>:$init_value,
19598
19599    I64ArrayAttr:$dimensions_to_reduce,
19600    SymbolRefAttr:$reducer
19601  );
19602
19603  let results = (outs
19604    Variadic<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>:$output
19605  );
19606
19607  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
19608  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19609}
19610
19611def TF_XlaVariadicReduceV2Op : TF_Op<"XlaVariadicReduceV2", [AttrSizedOperandSegments, NoSideEffect]> {
19612  let summary = "Wraps the variadic XLA Reduce operator.";
19613
19614  let description = [{
19615Semantics are documented at
19616 https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.
19617
19618This is an expanded version of XlaVariadicReduce, with support for
19619operands of different dtypes, and improved shape inference.
19620  }];
19621
19622  let arguments = (ins
19623    Arg<Variadic<TF_Tensor>, [{the input tensor(s)}]>:$inputs,
19624    Arg<Variadic<TF_Tensor>, [{scalar initial value(s) for the reduction}]>:$init_values,
19625
19626    I64ArrayAttr:$dimensions_to_reduce,
19627    SymbolRefAttr:$reducer
19628  );
19629
19630  let results = (outs
19631    Variadic<TF_Tensor>:$outputs
19632  );
19633
19634  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
19635}
19636
19637def TF_XlaVariadicSortOp : TF_Op<"XlaVariadicSort", [NoSideEffect]> {
19638  let summary = "Wraps the XLA Sort operator, documented at";
19639
19640  let description = [{
19641https://www.tensorflow.org/performance/xla/operation_semantics#sort
19642.
19643
19644Sorts one or more tensors, with support for custom comparator, dimension, and
19645is_stable attributes.
19646  }];
19647
19648  let arguments = (ins
19649    Arg<Variadic<TF_Tensor>, [{A list of `Tensor` of identical shape but possibly different types.}]>:$inputs,
19650    Arg<TF_Int32Tensor, [{The dimension along which to sort. Must be a compile-time constant.}]>:$dimension,
19651
19652    SymbolRefAttr:$comparator,
19653    BoolAttr:$is_stable
19654  );
19655
19656  let results = (outs
19657    Res<Variadic<TF_Tensor>, [{A list of `Tensor` of same shape and types as the `input`.}]>:$outputs
19658  );
19659
19660  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
19661}
19662
19663def TF_Xlog1pyOp : TF_Op<"Xlog1py", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
19664  let summary = "Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.";
19665
19666  let arguments = (ins
19667    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
19668    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
19669  );
19670
19671  let results = (outs
19672    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
19673  );
19674
19675  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19676}
19677
19678def TF_XlogyOp : TF_Op<"Xlogy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
19679                 WithBroadcastableBinOpBuilder {
19680  let summary = "Returns 0 if x == 0, and x * log(y) otherwise, elementwise.";
19681
19682  let arguments = (ins
19683    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
19684    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
19685  );
19686
19687  let results = (outs
19688    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
19689  );
19690
19691  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19692}
19693
19694def TF_ZerosLikeOp : TF_Op<"ZerosLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
19695  let summary = "Returns a tensor of zeros with the same shape and type as x.";
19696
19697  let arguments = (ins
19698    Arg<TF_Tensor, [{a tensor of type T.}]>:$x
19699  );
19700
19701  let results = (outs
19702    Res<TF_Tensor, [{a tensor of the same shape and type as x but filled with zeros.}]>:$y
19703  );
19704
19705  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19706}
19707
19708def TF_ZetaOp : TF_Op<"Zeta", [NoSideEffect, ResultsBroadcastableShape]>,
19709                WithBroadcastableBinOpBuilder {
19710  let summary = [{
19711Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
19712  }];
19713
19714  let description = [{
19715The Hurwitz zeta function is defined as:
19716
19717
19718\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
19719  }];
19720
19721  let arguments = (ins
19722    TF_F32OrF64Tensor:$x,
19723    TF_F32OrF64Tensor:$q
19724  );
19725
19726  let results = (outs
19727    TF_F32OrF64Tensor:$z
19728  );
19729
19730  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19731}
19732
19733def TF__ArrayToListOp : TF_Op<"_ArrayToList", [NoSideEffect]> {
19734  let summary = "Converts an array of tensors to a list of tensors.";
19735
19736  let arguments = (ins
19737    Variadic<TF_Tensor>:$input
19738  );
19739
19740  let results = (outs
19741    Variadic<TF_Tensor>:$output
19742  );
19743
19744  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
19745  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19746  TF_DerivedResultTypeListAttr out_types = TF_DerivedResultTypeListAttr<0>;
19747}
19748
19749def TF__EagerConstOp : TF_Op<"_EagerConst", [NoSideEffect]> {
19750  let summary = "";
19751
19752  let arguments = (ins
19753    TF_Tensor:$input
19754  );
19755
19756  let results = (outs
19757    TF_Tensor:$output
19758  );
19759
19760  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19761}
19762
19763def TF__FusedBatchNormExOp : TF_Op<"_FusedBatchNormEx", [NoSideEffect]> {
19764  let summary = "Internal FusedBatchNorm operation: reserved for internal use.";
19765
19766  let description = [{
19767Do not invoke this operator directly in Python. A fusion optimization is
19768expected to create these operators.
19769  }];
19770
19771  let arguments = (ins
19772    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
19773    TF_Float32Tensor:$scale,
19774    TF_Float32Tensor:$offset,
19775    TF_Float32Tensor:$mean,
19776    TF_Float32Tensor:$variance,
19777    Variadic<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>>:$side_input,
19778
19779    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
19780    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
19781    DefaultValuedAttr<StrAttr, "Identity">:$activation_mode,
19782    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
19783    DefaultValuedAttr<BoolAttr, "true">:$is_training
19784  );
19785
19786  let results = (outs
19787    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
19788    TF_Float32Tensor:$batch_mean,
19789    TF_Float32Tensor:$batch_variance,
19790    TF_Float32Tensor:$reserve_space_1,
19791    TF_Float32Tensor:$reserve_space_2,
19792    TF_Float32Tensor:$reserve_space_3
19793  );
19794
19795  TF_DerivedOperandSizeAttr num_side_inputs = TF_DerivedOperandSizeAttr<5>;
19796  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19797  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
19798}
19799
19800def TF__FusedConv2DOp : TF_Op<"_FusedConv2D", [NoSideEffect]> {
19801  let summary = [{
19802Performs a convolution followed by a specified series of operations.
19803  }];
19804
19805  let description = [{
19806The inputs to the convolution are `input` and `filter`. The series of operations
19807that follows is specified by the `fused_ops` attribute, which is a list of TF op
19808names specified as strings (e.g. "Relu"). They are performed in order, where the
19809(first) input to each op is the output of the preceding op. The first input and
19810the output of each fused_op must be of type T.
19811
19812Currently supported fused_op combinations are: [X] and [X,A], where X is one of
19813{"BiasAdd","FusedBatchNorm"} and A is one of {"Elu","Relu","Relu6"}.
19814
19815* The first input to op X is the Conv2D result, and the additional input(s) to X
19816are specified by `args`.
19817* If there is an op A specified, the output of op X is the input to op A, and op
19818A produces the _FusedConv2D output. Otherwise, op X produces the _FusedConv2D
19819output.
19820
19821*NOTE*: Do not invoke this operator directly in Python. Grappler is expected to
19822create these operators.
19823  }];
19824
19825  let arguments = (ins
19826    TF_F32OrF64Tensor:$input,
19827    TF_F32OrF64Tensor:$filter,
19828    Variadic<TF_F32OrF64Tensor>:$args,
19829
19830    I64ArrayAttr:$strides,
19831    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
19832    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
19833    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
19834    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations,
19835    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
19836    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
19837    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
19838    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
19839  );
19840
19841  let results = (outs
19842    TF_F32OrF64Tensor:$output
19843  );
19844
19845  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
19846  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19847}
19848
19849def TF__FusedMatMulOp : TF_Op<"_FusedMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
19850  let summary = [{
19851Performs a MatMul followed by a specified series of operations.
19852  }];
19853
19854  let description = [{
19855The inputs to the MatMul are specified by `a` and `b`. The series of operations
19856that follows is specified by the `fused_ops` attribute, which is a list of TF op
19857names specified as strings (e.g. "Relu"). They are performed in order, where the
19858(first) input to each op is the output of the preceding op. The first input and
19859the output of each fused_op must be of type T.
19860
19861Currently supported fused_op combinations are: ["BiasAdd"] and ["BiasAdd",A],
19862where A is one of {"Elu","Relu","Relu6"}.
19863
19864* The first input to BiasAdd is the Conv2D result, and the additional BiasAdd
19865input is specified by `args`.
19866* If there is an op A specified, the output of the BiasAdd is the input to op A,
19867and op A produces the _FusedConv2D output. Otherwise, the BiasAdd produces the
19868_FusedConv2D output.
19869
19870*NOTE*: Do not invoke this operator directly in Python. Grappler is
19871expected to create these operators.
19872  }];
19873
19874  let arguments = (ins
19875    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
19876    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
19877    Variadic<TensorOf<[TF_Bfloat16, TF_Float32]>>:$args,
19878
19879    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
19880    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
19881    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
19882    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
19883    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
19884  );
19885
19886  let results = (outs
19887    TensorOf<[TF_Bfloat16, TF_Float32]>:$product
19888  );
19889
19890  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
19891  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19892}
19893
19894def TF__HostRecvOp : TF_Op<"_HostRecv", []> {
19895  let summary = "Receives the named tensor from send_device on recv_device.";
19896
19897  let description = [{
19898_HostRecv produces its output on host memory whereas _Recv produces its
19899output on device memory.
19900  }];
19901
19902  let arguments = (ins
19903    StrAttr:$tensor_name,
19904    StrAttr:$send_device,
19905    I64Attr:$send_device_incarnation,
19906    StrAttr:$recv_device,
19907    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
19908  );
19909
19910  let results = (outs
19911    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
19912  );
19913
19914  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
19915}
19916
19917def TF__HostSendOp : TF_Op<"_HostSend", []> {
19918  let summary = "Sends the named tensor from send_device to recv_device.";
19919
19920  let description = [{
19921_HostSend requires its input on host memory whereas _Send requires its
19922input on device memory.
19923  }];
19924
19925  let arguments = (ins
19926    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
19927
19928    StrAttr:$tensor_name,
19929    StrAttr:$send_device,
19930    I64Attr:$send_device_incarnation,
19931    StrAttr:$recv_device,
19932    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
19933  );
19934
19935  let results = (outs);
19936
19937  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
19938}
19939
19940def TF__ListToArrayOp : TF_Op<"_ListToArray", [NoSideEffect]> {
19941  let summary = "Converts a list of tensors to an array of tensors.";
19942
19943  let arguments = (ins
19944    Variadic<TF_Tensor>:$input
19945  );
19946
19947  let results = (outs
19948    Variadic<TF_Tensor>:$output
19949  );
19950
19951  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
19952  TF_DerivedResultSizeAttr N = TF_DerivedResultSizeAttr<0>;
19953  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
19954}
19955
19956def TF__RecvTPUEmbeddingActivationsOp : TF_Op<"_RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
19957  let summary = "An op that receives embeddng activations on the TPU.";
19958
19959  let description = [{
19960The TPU system performs the embedding lookups and aggregations. The results of
19961these aggregations are visible to the Tensorflow Graph as the outputs of a
19962_RecvTPUEmbeddingActivations Op. This op returns a list containing one
19963Tensor of activations per table specified in the model.
19964  }];
19965
19966  let arguments = (ins
19967    Arg<TF_VariantTensor, [{A Tensor with type=DT_VARIANT containing the deduplication
19968data. The tensor is an XLA nested tuple containing N elements (where N is
19969the ratio of the number of embedding to tensor cores per TPU chip). Each
19970element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
19971contains indices (DT_UINT32) for embedding lookup on the TensorCore or
19972weights (DT_FLOAT) to apply to the output of the embedding lookup operation.}]>:$deduplication_data,
19973
19974    StrAttr:$config
19975  );
19976
19977  let results = (outs
19978    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
19979embedding table in the model.}]>:$outputs
19980  );
19981
19982  TF_DerivedResultSizeAttr num_tables = TF_DerivedResultSizeAttr<0>;
19983}
19984
19985def TF__RecvTPUEmbeddingDeduplicationDataOp : TF_Op<"_RecvTPUEmbeddingDeduplicationData", []> {
19986  let summary = [{
19987Receives deduplication data (indices and weights) from the embedding core.
19988  }];
19989
19990  let description = [{
19991The deduplication data is a Tensor with type=DT_VARIANT. The tensor itself is an
19992XLA nested tuple containing N elements (where N is the ratio of the number of
19993embedding to tensor cores per TPU chip). Each element of the nested tuple is a
19994tuple of rank 1 tensors. Each tensor either contains indices (DT_UINT32) for
19995embedding lookup on the TensorCore or weights (DT_FLOAT) to apply to the output
19996of the embedding lookup operation.
19997  }];
19998
19999  let arguments = (ins
20000    StrAttr:$config
20001  );
20002
20003  let results = (outs
20004    TF_VariantTensor:$output
20005  );
20006}
20007
20008def TF__SendTPUEmbeddingGradientsOp : TF_Op<"_SendTPUEmbeddingGradients", [AttrSizedOperandSegments, TF_TPUEmbeddingSideEffect]> {
20009  let summary = "An op that performs gradient updates of embedding tables.";
20010
20011  let description = [{
20012The gradients argument is a TensorList having the same length and shapes as the
20013return value of _RecvTPUEmbeddingActivations, but contains gradients of the
20014model's loss with respect to the embedding activations. The embedding tables are
20015updated from these gradients via the optimizer specified in the
20016TPUEmbeddingConfiguration proto given to tpu.initialize_system.
20017  }];
20018
20019  let arguments = (ins
20020    Arg<Variadic<TF_Float32Tensor>, [{A TensorList of gradients with which to update embedding tables.}]>:$gradients,
20021    Arg<Variadic<TF_Float32Tensor>, [{A TensorList of learning rates used for updating the embedding
20022tables via the optimizer. The length of the TensorList must be equal to the
20023number of dynamic learning rate tags specified in the
20024TPUEmbeddingConfiguration proto.}]>:$learning_rates,
20025    Arg<TF_VariantTensor, [{A Tensor with type=DT_VARIANT containing the deduplication
20026data. The tensor is an XLA nested tuple containing N elements (where N is
20027the ratio of the number of embedding to tensor cores per TPU chip). Each
20028element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
20029contains indices (DT_UINT32) for embedding lookup on the TensorCore or
20030weights (DT_FLOAT) to apply to the output of the embedding lookup operation.}]>:$deduplication_data,
20031
20032    StrAttr:$config
20033  );
20034
20035  let results = (outs);
20036
20037  TF_DerivedOperandSizeAttr NumLearningRateTags = TF_DerivedOperandSizeAttr<1>;
20038  TF_DerivedOperandSizeAttr NumTables = TF_DerivedOperandSizeAttr<0>;
20039}
20040
20041def TF__TPUCompileMlirOp : TF_Op<"_TPUCompileMlir", []> {
20042  let summary = [{
20043Compiles a computations for execution on one or more TPU devices.
20044  }];
20045
20046  let description = [{
20047For the internal use of the distributed TPU compiler.
20048
20049'mlir_module' is a serialized MLIR module with a `main` function that contains
20050target computation.
20051'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not
20052known statically at TPUReplication rewrite time.
20053'metadata' is a serialized TPUCompileMetadataProto describing the shapes and
20054types of the inputs to the computation, as well as a mapping onto the TPU pod
20055topology.
20056'program' output is a string key that is passed to the TPUExecute op and used to
20057look up the program in the compilation cache.
20058  }];
20059
20060  let arguments = (ins
20061    Variadic<TF_Int64Tensor>:$dynamic_shapes,
20062
20063    DefaultValuedAttr<StrAttr, "">:$mlir_module,
20064    StrAttr:$metadata
20065  );
20066
20067  let results = (outs
20068    TF_StrTensor:$compilation_status,
20069    Variadic<TF_StrTensor>:$program
20070  );
20071
20072  TF_DerivedOperandSizeAttr NumDynamicShapes = TF_DerivedOperandSizeAttr<0>;
20073  TF_DerivedResultSizeAttr num_computations = TF_DerivedResultSizeAttr<1>;
20074}
20075
20076def TF__TPUCompileMlirPlaceholderProgramKeyOp : TF_Op<"_TPUCompileMlirPlaceholderProgramKey", []> {
20077  let summary = [{
20078Placeholder program key (compilation cache key) of a _TPUCompileMlir `program`.
20079  }];
20080
20081  let description = [{
20082This op can be used when certain rewrite passes materialize ops that require a
20083program key but the _TPUCompileMlir op has not been added yet. Subsequent
20084rewrite passes must replace this op with a _TPUCompileMlir op `program` output.
20085  }];
20086
20087  let arguments = (ins);
20088
20089  let results = (outs
20090    TF_StrTensor:$program
20091  );
20092}
20093
20094def TF__UnaryOpsCompositionOp : TF_Op<"_UnaryOpsComposition", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
20095  let summary = [{
20096*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is
20097  }];
20098
20099  let description = [{
20100expected to create these operators.
20101  }];
20102
20103  let arguments = (ins
20104    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$x,
20105
20106    StrArrayAttr:$op_names
20107  );
20108
20109  let results = (outs
20110    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$y
20111  );
20112
20113  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
20114}
20115
20116def TF__XlaHostComputeMlirOp : TF_Op<"_XlaHostComputeMlir", []> {
20117  let summary = [{
20118A pseudo-op to represent host-side computation in an XLA program.
20119  }];
20120
20121  let arguments = (ins
20122    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
20123
20124    StrAttr:$send_key,
20125    StrAttr:$recv_key,
20126    DefaultValuedAttr<I64Attr, "0">:$tpu_core,
20127    DefaultValuedAttr<StrAttr, "">:$host_mlir_module
20128  );
20129
20130  let results = (outs
20131    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
20132  );
20133
20134  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
20135  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
20136
20137  let extraClassDeclaration = [{
20138    FuncOp GetHostFunc(mlir::OwningModuleRef* mlir_module);
20139  }];
20140
20141  let verifier = [{ return Verify(*this); }];
20142}
20143
20144def TF__XlaRecvAtHostOp : TF_Op<"_XlaRecvAtHost", []> {
20145  let summary = [{
20146A placeholder op to receive values from a running XLA computation.
20147  }];
20148
20149  let arguments = (ins
20150    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
20151execution the transfer corresponds to.}]>:$dynamic_key,
20152
20153    StrAttr:$key,
20154    I64Attr:$device_ordinal
20155  );
20156
20157  let results = (outs
20158    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
20159  );
20160
20161  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
20162}
20163
20164def TF__XlaRecvAtHostV2Op : TF_Op<"_XlaRecvAtHostV2", []> {
20165  let summary = [{
20166A placeholder op to receive values from a running XLA computation with support for a runtime device ordinal.
20167  }];
20168
20169  let arguments = (ins
20170    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
20171execution the transfer corresponds to.}]>:$dynamic_key,
20172    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
20173
20174    StrAttr:$key
20175  );
20176
20177  let results = (outs
20178    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
20179  );
20180
20181  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
20182}
20183
20184def TF__XlaSendFromHostOp : TF_Op<"_XlaSendFromHost", []> {
20185  let summary = "A placeholder op to send values to a running XLA computation.";
20186
20187  let arguments = (ins
20188    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
20189    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
20190execution the transfer corresponds to.}]>:$dynamic_key,
20191
20192    StrAttr:$key,
20193    I64Attr:$device_ordinal
20194  );
20195
20196  let results = (outs);
20197
20198  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
20199}
20200
20201def TF__XlaSendFromHostV2Op : TF_Op<"_XlaSendFromHostV2", []> {
20202  let summary = [{
20203A placeholder op to send values to a running XLA computation with support for a runtime device ordinal.
20204  }];
20205
20206  let arguments = (ins
20207    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
20208    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
20209execution the transfer corresponds to.}]>:$dynamic_key,
20210    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
20211
20212    StrAttr:$key
20213  );
20214
20215  let results = (outs);
20216
20217  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
20218}
20219