• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the auto-generated operation definition file for TensorFlow.
17//
18// PLEASE DO NOT MANUALLY EDIT THIS FILE!
19//
20// If you absolutely need to modify the generated fields of an op, move the op
21// definition to `tf_ops.td` and perform the modification there.
22//
23// This file contains TensorFlow ops whose definitions are programmatically
24// generated from the api-def-files in the following folder:
25// tensorflow/core/api_def/base_api
26// The generated fields for an op include name, summary, description, traits,
27// arguments, results, derived attributes. Therefore, modifications to these
28// fields will NOT be respected upon subsequent refreshes. However, additional
29// fields after those fields will be retained.
30//
31// Ops in this file are sorted alphabetically.
32
33include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td"
34include "mlir/Interfaces/InferTypeOpInterface.td"
35
36def TF_AbsOp : TF_Op<"Abs", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
37  let summary = "Computes the absolute value of a tensor.";
38
39  let description = [{
40Given a tensor `x`, this operation returns a tensor containing the absolute
41value of each element in `x`. For example, if x is an input element and y is
42an output element, this operation computes \\(y = |x|\\).
43  }];
44
45  let arguments = (ins
46    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
47  );
48
49  let results = (outs
50    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
51  );
52
53  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
54}
55
56def TF_AcosOp : TF_Op<"Acos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
57  let summary = "Computes acos of x element-wise.";
58
59  let description = [{
60Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
61
62  Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
63  }];
64
65  let arguments = (ins
66    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
67  );
68
69  let results = (outs
70    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
71  );
72
73  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
74}
75
76def TF_AcoshOp : TF_Op<"Acosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
77  let summary = "Computes inverse hyperbolic cosine of x element-wise.";
78
79  let description = [{
80Given an input tensor, the function computes inverse hyperbolic cosine of every element.
81Input range is `[1, inf]`. It returns `nan` if the input lies outside the range.
82
83```python
84x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")])
85tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf]
86```
87  }];
88
89  let arguments = (ins
90    TF_FpOrComplexTensor:$x
91  );
92
93  let results = (outs
94    TF_FpOrComplexTensor:$y
95  );
96
97  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
98}
99
100def TF_AddNOp : TF_Op<"AddN", [Commutative, NoSideEffect]> {
101  let summary = "Add all input tensors element wise.";
102
103  let description = [{
104Inputs must be of same size and shape.
105
106  ```python
107  x = [9, 7, 10]
108  tf.math.add_n(x) ==> 26
109  ```
110  }];
111
112  let arguments = (ins
113    Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>>:$inputs
114  );
115
116  let results = (outs
117    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8, TF_Variant]>:$sum
118  );
119
120  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
121  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
122
123  let hasFolder = 1;
124}
125
126def TF_AdjustContrastv2Op : TF_Op<"AdjustContrastv2", [NoSideEffect]> {
127  let summary = "Adjust the contrast of one or more images.";
128
129  let description = [{
130`images` is a tensor of at least 3 dimensions.  The last 3 dimensions are
131interpreted as `[height, width, channels]`.  The other dimensions only
132represent a collection of images, such as `[batch, height, width, channels].`
133
134Contrast is adjusted independently for each channel of each image.
135
136For each channel, the Op first computes the mean of the image pixels in the
137channel and then adjusts each component of each pixel to
138`(x - mean) * contrast_factor + mean`.
139  }];
140
141  let arguments = (ins
142    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
143    Arg<TF_Float32Tensor, [{A float multiplier for adjusting contrast.}]>:$contrast_factor
144  );
145
146  let results = (outs
147    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The contrast-adjusted image or images.}]>:$output
148  );
149
150  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
151}
152
153def TF_AdjustHueOp : TF_Op<"AdjustHue", [NoSideEffect]> {
154  let summary = "Adjust the hue of one or more images.";
155
156  let description = [{
157`images` is a tensor of at least 3 dimensions.  The last dimension is
158interpreted as channels, and must be three.
159
160The input image is considered in the RGB colorspace. Conceptually, the RGB
161colors are first mapped into HSV. A delta is then applied all the hue values,
162and then remapped back to RGB colorspace.
163  }];
164
165  let arguments = (ins
166    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
167    Arg<TF_Float32Tensor, [{A float delta to add to the hue.}]>:$delta
168  );
169
170  let results = (outs
171    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
172  );
173
174  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
175}
176
177def TF_AdjustSaturationOp : TF_Op<"AdjustSaturation", [NoSideEffect]> {
178  let summary = "Adjust the saturation of one or more images.";
179
180  let description = [{
181`images` is a tensor of at least 3 dimensions.  The last dimension is
182interpreted as channels, and must be three.
183
184The input image is considered in the RGB colorspace. Conceptually, the RGB
185colors are first mapped into HSV. A scale is then applied all the saturation
186values, and then remapped back to RGB colorspace.
187  }];
188
189  let arguments = (ins
190    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{Images to adjust.  At least 3-D.}]>:$images,
191    Arg<TF_Float32Tensor, [{A float scale to add to the saturation.}]>:$scale
192  );
193
194  let results = (outs
195    Res<TensorOf<[TF_Float16, TF_Float32]>, [{The hue-adjusted image or images.}]>:$output
196  );
197
198  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
199}
200
201def TF_AllOp : TF_Op<"All", [NoSideEffect]> {
202  let summary = [{
203Computes the "logical and" of elements across dimensions of a tensor.
204  }];
205
206  let description = [{
207Reduces `input` along the dimensions given in `axis`. Unless
208`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
209`axis`. If `keep_dims` is true, the reduced dimensions are
210retained with length 1.
211  }];
212
213  let arguments = (ins
214    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
215    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
216`[-rank(input), rank(input))`.}]>:$reduction_indices,
217
218    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
219  );
220
221  let results = (outs
222    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
223  );
224
225  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
226
227  let verifier = [{ return Verify(*this); }];
228}
229
230def TF_AllToAllOp : TF_Op<"AllToAll", [NoSideEffect, TF_NoConstantFold]> {
231  let summary = "An Op to exchange data across TPU replicas.";
232
233  let description = [{
234On each replica, the input is split into `split_count` blocks along
235`split_dimension` and send to the other replicas given group_assignment. After
236receiving `split_count` - 1 blocks from other replicas, we concatenate the
237blocks along `concat_dimension` as the output.
238
239For example, suppose there are 2 TPU replicas:
240replica 0 receives input: `[[A, B]]`
241replica 1 receives input: `[[C, D]]`
242
243group_assignment=`[[0, 1]]`
244concat_dimension=0
245split_dimension=1
246split_count=2
247
248replica 0's output: `[[A], [C]]`
249replica 1's output: `[[B], [D]]`
250  }];
251
252  let arguments = (ins
253    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The local input to the sum.}]>:$input,
254    Arg<TF_Int32Tensor, [{An int32 tensor with shape
255[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
256replica ids in the ith subgroup.}]>:$group_assignment,
257
258    I64Attr:$concat_dimension,
259    I64Attr:$split_dimension,
260    I64Attr:$split_count
261  );
262
263  let results = (outs
264    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The exchanged result.}]>:$output
265  );
266
267  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
268}
269
270def TF_AngleOp : TF_Op<"Angle", [NoSideEffect, SameOperandsAndResultShape]> {
271  let summary = "Returns the argument of a complex number.";
272
273  let description = [{
274Given a tensor `input` of complex numbers, this operation returns a tensor of
275type `float` that is the argument of each element in `input`. All elements in
276`input` must be complex numbers of the form \\(a + bj\\), where *a*
277is the real part and *b* is the imaginary part.
278
279The argument returned by this operation is of the form \\(atan2(b, a)\\).
280
281For example:
282
283```
284# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
285tf.angle(input) ==> [2.0132, 1.056]
286```
287
288@compatibility(numpy)
289Equivalent to np.angle.
290@end_compatibility
291  }];
292
293  let arguments = (ins
294    TensorOf<[TF_Complex128, TF_Complex64]>:$input
295  );
296
297  let results = (outs
298    TF_F32OrF64Tensor:$output
299  );
300
301  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
302  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
303}
304
305def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", []> {
306  let summary = "A container for an iterator resource.";
307
308  let arguments = (ins
309    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
310    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
311  );
312
313  let results = (outs
314    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
315"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
316resource sharing by name, and does not keep a reference to the resource
317container.}], [TF_DatasetIteratorAlloc]>:$handle
318  );
319}
320
321def TF_AnonymousIteratorV2Op : TF_Op<"AnonymousIteratorV2", []> {
322  let summary = "A container for an iterator resource.";
323
324  let arguments = (ins
325    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
326    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
327  );
328
329  let results = (outs
330    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
331"IteratorGetNext" op. In contrast to Iterator, AnonymousIterator prevents
332resource sharing by name, and does not keep a reference to the resource
333container.}], [TF_DatasetIteratorAlloc]>:$handle,
334    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
335  );
336}
337
338def TF_AnonymousMemoryCacheOp : TF_Op<"AnonymousMemoryCache", []> {
339  let summary = "";
340
341  let arguments = (ins);
342
343  let results = (outs
344    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle,
345    TF_VariantTensor:$deleter
346  );
347}
348
349def TF_AnonymousMultiDeviceIteratorOp : TF_Op<"AnonymousMultiDeviceIterator", []> {
350  let summary = "A container for a multi device iterator resource.";
351
352  let arguments = (ins
353    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
354    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
355    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
356  );
357
358  let results = (outs
359    Res<TF_ResourceTensor, [{A handle to a multi device iterator that can be passed to a
360"MultiDeviceIteratorGetNextFromShard" op. In contrast to MultiDeviceIterator,
361AnonymousIterator prevents resource sharing by name, and does not keep a
362reference to the resource container.}], [TF_DatasetIteratorAlloc]>:$handle,
363    Res<TF_VariantTensor, [{A variant deleter that should be passed into the op that deletes the iterator.}]>:$deleter
364  );
365}
366
367def TF_AnonymousRandomSeedGeneratorOp : TF_Op<"AnonymousRandomSeedGenerator", []> {
368  let summary = "";
369
370  let arguments = (ins
371    TF_Int64Tensor:$seed,
372    TF_Int64Tensor:$seed2
373  );
374
375  let results = (outs
376    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
377    TF_VariantTensor:$deleter
378  );
379}
380
381def TF_AnonymousSeedGeneratorOp : TF_Op<"AnonymousSeedGenerator", []> {
382  let summary = "";
383
384  let arguments = (ins
385    TF_Int64Tensor:$seed,
386    TF_Int64Tensor:$seed2,
387    TF_BoolTensor:$reshuffle
388  );
389
390  let results = (outs
391    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle,
392    TF_VariantTensor:$deleter
393  );
394}
395
396def TF_AnyOp : TF_Op<"Any", [NoSideEffect]> {
397  let summary = [{
398Computes the "logical or" of elements across dimensions of a tensor.
399  }];
400
401  let description = [{
402Reduces `input` along the dimensions given in `axis`. Unless
403`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
404`axis`. If `keep_dims` is true, the reduced dimensions are
405retained with length 1.
406  }];
407
408  let arguments = (ins
409    Arg<TF_BoolTensor, [{The tensor to reduce.}]>:$input,
410    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
411`[-rank(input), rank(input))`.}]>:$reduction_indices,
412
413    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
414  );
415
416  let results = (outs
417    Res<TF_BoolTensor, [{The reduced tensor.}]>:$output
418  );
419
420  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
421
422  let verifier = [{ return Verify(*this); }];
423}
424
425def TF_ApproximateEqualOp : TF_Op<"ApproximateEqual", [Commutative, NoSideEffect]> {
426  let summary = "Returns the truth value of abs(x-y) < tolerance element-wise.";
427
428  let arguments = (ins
429    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
430    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y,
431
432    DefaultValuedAttr<F32Attr, "1e-05f">:$tolerance
433  );
434
435  let results = (outs
436    TF_BoolTensor:$z
437  );
438
439  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
440}
441
442def TF_ArgMaxOp : TF_Op<"ArgMax", [NoSideEffect]> {
443  let summary = [{
444Returns the index with the largest value across dimensions of a tensor.
445  }];
446
447  let description = [{
448Note that in case of ties the identity of the return value is not guaranteed.
449
450Usage:
451  ```python
452  import tensorflow as tf
453  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
454  b = tf.math.argmax(input = a)
455  c = tf.keras.backend.eval(b)
456  # c = 4
457  # here a[4] = 166.32 which is the largest element of a across axis 0
458  ```
459  }];
460
461  let arguments = (ins
462    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
463    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
464Describes which dimension of the input Tensor to reduce across. For vectors,
465use dimension = 0.}]>:$dimension
466  );
467
468  let results = (outs
469    TF_I32OrI64Tensor:$output
470  );
471
472  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
473  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
474  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
475}
476
477def TF_ArgMinOp : TF_Op<"ArgMin", [NoSideEffect]> {
478  let summary = [{
479Returns the index with the smallest value across dimensions of a tensor.
480  }];
481
482  let description = [{
483Note that in case of ties the identity of the return value is not guaranteed.
484
485Usage:
486  ```python
487  import tensorflow as tf
488  a = [1, 10, 26.9, 2.8, 166.32, 62.3]
489  b = tf.math.argmin(input = a)
490  c = tf.keras.backend.eval(b)
491  # c = 0
492  # here a[0] = 1 which is the smallest element of a across axis 0
493  ```
494  }];
495
496  let arguments = (ins
497    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
498    Arg<TF_I32OrI64Tensor, [{int32 or int64, must be in the range `[-rank(input), rank(input))`.
499Describes which dimension of the input Tensor to reduce across. For vectors,
500use dimension = 0.}]>:$dimension
501  );
502
503  let results = (outs
504    TF_I32OrI64Tensor:$output
505  );
506
507  TF_DerivedResultTypeAttr output_type = TF_DerivedResultTypeAttr<0>;
508  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
509  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
510}
511
512def TF_AsStringOp : TF_Op<"AsString", [NoSideEffect, SameOperandsAndResultShape]> {
513  let summary = "Converts each entry in the given tensor to strings.";
514
515  let description = [{
516Supports many numeric types and boolean.
517
518For Unicode, see the
519[https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text)
520tutorial.
521
522Examples:
523
524>>> tf.strings.as_string([3, 2])
525<tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)>
526>>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy()
527array([b'3.14', b'2.72'], dtype=object)
528  }];
529
530  let arguments = (ins
531    TensorOf<[TF_Bool, TF_Complex128, TF_Complex64, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Variant]>:$input,
532
533    DefaultValuedAttr<I64Attr, "-1">:$precision,
534    DefaultValuedAttr<BoolAttr, "false">:$scientific,
535    DefaultValuedAttr<BoolAttr, "false">:$shortest,
536    DefaultValuedAttr<I64Attr, "-1">:$width,
537    StrAttr:$fill
538  );
539
540  let results = (outs
541    TF_StrTensor:$output
542  );
543
544  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
545}
546
547def TF_AsinOp : TF_Op<"Asin", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
548  let summary = "Computes the trignometric inverse sine of x element-wise.";
549
550  let description = [{
551The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that
552if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`.
553
554**Note**: The output of `tf.math.asin` will lie within the invertible range
555of sine, i.e [-pi/2, pi/2].
556
557For example:
558
559```python
560# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
561x = tf.constant([1.047, 0.785])
562y = tf.math.sin(x) # [0.8659266, 0.7068252]
563
564tf.math.asin(y) # [1.047, 0.785] = x
565```
566  }];
567
568  let arguments = (ins
569    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
570  );
571
572  let results = (outs
573    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
574  );
575
576  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
577}
578
579def TF_AsinhOp : TF_Op<"Asinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
580  let summary = "Computes inverse hyperbolic sine of x element-wise.";
581
582  let description = [{
583Given an input tensor, this function computes inverse hyperbolic sine
584  for every element in the tensor. Both input and output has a range of
585  `[-inf, inf]`.
586
587  ```python
588  x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")])
589  tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf]
590  ```
591  }];
592
593  let arguments = (ins
594    TF_FpOrComplexTensor:$x
595  );
596
597  let results = (outs
598    TF_FpOrComplexTensor:$y
599  );
600
601  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
602}
603
604def TF_AssertOp : TF_Op<"Assert", []> {
605  let summary = "Asserts that the given condition is true.";
606
607  let description = [{
608If `condition` evaluates to false, print the list of tensors in `data`.
609`summarize` determines how many entries of the tensors to print.
610  }];
611
612  let arguments = (ins
613    Arg<TF_BoolTensor, [{The condition to evaluate.}]>:$condition,
614    Arg<Variadic<TF_Tensor>, [{The tensors to print out when condition is false.}]>:$data,
615
616    DefaultValuedAttr<I64Attr, "3">:$summarize
617  );
618
619  let results = (outs);
620
621  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<1>;
622
623  let hasCanonicalizer = 1;
624}
625
626def TF_AssignAddVariableOp : TF_Op<"AssignAddVariableOp", []> {
627  let summary = "Adds a value to the current value of a variable.";
628
629  let description = [{
630Any ReadVariableOp with a control dependency on this op is guaranteed to
631see the incremented value or a subsequent newer one.
632  }];
633
634  let arguments = (ins
635    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
636    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
637  );
638
639  let results = (outs);
640
641  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
642}
643
644def TF_AssignSubVariableOp : TF_Op<"AssignSubVariableOp", []> {
645  let summary = "Subtracts a value from the current value of a variable.";
646
647  let description = [{
648Any ReadVariableOp with a control dependency on this op is guaranteed to
649see the decremented value or a subsequent newer one.
650  }];
651
652  let arguments = (ins
653    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
654    Arg<TF_Tensor, [{the value by which the variable will be incremented.}]>:$value
655  );
656
657  let results = (outs);
658
659  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
660}
661
662def TF_AssignVariableOp : TF_Op<"AssignVariableOp", []> {
663  let summary = "Assigns a new value to a variable.";
664
665  let description = [{
666Any ReadVariableOp with a control dependency on this op is guaranteed to return
667this value or a subsequent newer value of the variable.
668  }];
669
670  let arguments = (ins
671    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableWrite]>:$resource,
672    Arg<TF_Tensor, [{the value to set the new tensor to use.}]>:$value
673  );
674
675  let results = (outs);
676
677  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
678}
679
680def TF_AtanOp : TF_Op<"Atan", [NoSideEffect, SameOperandsAndResultType]> {
681  let summary = "Computes the trignometric inverse tangent of x element-wise.";
682
683  let description = [{
684The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that
685if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`.
686
687**Note**: The output of `tf.math.atan` will lie within the invertible range
688of tan, i.e (-pi/2, pi/2).
689
690For example:
691
692```python
693# Note: [1.047, 0.785] ~= [(pi/3), (pi/4)]
694x = tf.constant([1.047, 0.785])
695y = tf.math.tan(x) # [1.731261, 0.99920404]
696
697tf.math.atan(y) # [1.047, 0.785] = x
698```
699  }];
700
701  let arguments = (ins
702    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
703  );
704
705  let results = (outs
706    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
707  );
708
709  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
710}
711
712def TF_Atan2Op : TF_Op<"Atan2", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
713                 WithBroadcastableBinOpBuilder {
714  let summary = [{
715Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
716  }];
717
718  let description = [{
719This is the angle \( \theta \in [-\pi, \pi] \) such that
720\[ x = r \cos(\theta) \]
721and
722\[ y = r \sin(\theta) \]
723where \(r = \sqrt(x^2 + y^2) \).
724  }];
725
726  let arguments = (ins
727    TF_FloatTensor:$y,
728    TF_FloatTensor:$x
729  );
730
731  let results = (outs
732    TF_FloatTensor:$z
733  );
734
735  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
736}
737
738def TF_AtanhOp : TF_Op<"Atanh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
739  let summary = "Computes inverse hyperbolic tangent of x element-wise.";
740
741  let description = [{
742Given an input tensor, this function computes inverse hyperbolic tangent
743  for every element in the tensor. Input range is `[-1,1]` and output range is
744  `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the
745  input is `1`, output will be `inf`. Values outside the range will have
746  `nan` as output.
747
748  ```python
749  x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")])
750  tf.math.atanh(x) ==> [nan -inf -0.54930615 inf  0. 0.54930615 nan nan]
751  ```
752  }];
753
754  let arguments = (ins
755    TF_FpOrComplexTensor:$x
756  );
757
758  let results = (outs
759    TF_FpOrComplexTensor:$y
760  );
761
762  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
763}
764
765def TF_AvgPoolOp : TF_Op<"AvgPool", [NoSideEffect]> {
766  let summary = "Performs average pooling on the input.";
767
768  let description = [{
769Each entry in `output` is the mean of the corresponding size `ksize`
770window in `value`.
771  }];
772
773  let arguments = (ins
774    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$value,
775
776    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
777    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
778    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
779    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
780  );
781
782  let results = (outs
783    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
784  );
785
786  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
787}
788
789def TF_AvgPool3DOp : TF_Op<"AvgPool3D", [NoSideEffect]> {
790  let summary = "Performs 3D average pooling on the input.";
791
792  let description = [{
793Each entry in `output` is the mean of the corresponding size `ksize` window in
794`value`.
795  }];
796
797  let arguments = (ins
798    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
799
800    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
801    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
802    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
803    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
804  );
805
806  let results = (outs
807    Res<TF_FloatTensor, [{The average pooled output tensor.}]>:$output
808  );
809
810  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
811}
812
813def TF_AvgPool3DGradOp : TF_Op<"AvgPool3DGrad", [NoSideEffect]> {
814  let summary = "Computes gradients of average pooling function.";
815
816  let arguments = (ins
817    Arg<TF_Int32Tensor, [{The original input dimensions.}]>:$orig_input_shape,
818    Arg<TF_FloatTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
819
820    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
821    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
822    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
823    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
824  );
825
826  let results = (outs
827    Res<TF_FloatTensor, [{The backprop for input.}]>:$output
828  );
829
830  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
831}
832
833def TF_AvgPoolGradOp : TF_Op<"AvgPoolGrad", [NoSideEffect]> {
834  let summary = "Computes gradients of the average pooling function.";
835
836  let arguments = (ins
837    Arg<TF_Int32Tensor, [{1-D.  Shape of the original input to `avg_pool`.}]>:$orig_input_shape,
838    Arg<TF_FloatTensor, [{4-D with shape `[batch, height, width, channels]`.  Gradients w.r.t.
839the output of `avg_pool`.}]>:$grad,
840
841    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
842    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
843    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
844    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
845  );
846
847  let results = (outs
848    Res<TF_FloatTensor, [{4-D.  Gradients w.r.t. the input of `avg_pool`.}]>:$output
849  );
850
851  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
852}
853
854def TF_BatchMatMulOp : TF_Op<"BatchMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
855  let summary = "Multiplies slices of two tensors in batches.";
856
857  let description = [{
858Multiplies all slices of `Tensor` `x` and `y` (each slice can be
859viewed as an element of a batch), and arranges the individual results
860in a single output tensor of the same batch size. Each of the
861individual slices can optionally be adjointed (to adjoint a matrix
862means to transpose and conjugate it) before multiplication by setting
863the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
864
865The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
866and `[..., r_y, c_y]`.
867
868The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
869
870    r_o = c_x if adj_x else r_x
871    c_o = r_y if adj_y else c_y
872
873It is computed as:
874
875    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
876  }];
877
878  let arguments = (ins
879    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
880    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
881
882    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
883    DefaultValuedAttr<BoolAttr, "false">:$adj_y
884  );
885
886  let results = (outs
887    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
888  );
889
890  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
891
892  let hasCanonicalizer = 1;
893
894  let verifier = [{
895    return Verify(*this);
896  }];
897}
898
899def TF_BatchMatMulV2Op : TF_Op<"BatchMatMulV2", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
900  let summary = "Multiplies slices of two tensors in batches.";
901
902  let description = [{
903Multiplies all slices of `Tensor` `x` and `y` (each slice can be
904viewed as an element of a batch), and arranges the individual results
905in a single output tensor of the same batch size. Each of the
906individual slices can optionally be adjointed (to adjoint a matrix
907means to transpose and conjugate it) before multiplication by setting
908the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
909
910The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
911and `[..., r_y, c_y]`.
912
913The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
914
915    r_o = c_x if adj_x else r_x
916    c_o = r_y if adj_y else c_y
917
918It is computed as:
919
920    output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
921
922*NOTE*: `BatchMatMulV2` supports broadcasting in the batch dimensions. More
923about broadcasting
924[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
925  }];
926
927  let arguments = (ins
928    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_x, c_x]`.}]>:$x,
929    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{2-D or higher with shape `[..., r_y, c_y]`.}]>:$y,
930
931    DefaultValuedAttr<BoolAttr, "false">:$adj_x,
932    DefaultValuedAttr<BoolAttr, "false">:$adj_y
933  );
934
935  let results = (outs
936    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64]>, [{3-D or higher with shape `[..., r_o, c_o]`}]>:$output
937  );
938
939  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
940
941  let verifier = [{
942    return Verify(*this);
943  }];
944
945  let hasCanonicalizer = 1;
946}
947
948def TF_BatchNormWithGlobalNormalizationOp : TF_Op<"BatchNormWithGlobalNormalization", [NoSideEffect]> {
949  let summary = "Batch normalization.";
950
951  let description = [{
952This op is deprecated. Prefer `tf.nn.batch_normalization`.
953  }];
954
955  let arguments = (ins
956    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 4D input Tensor.}]>:$t,
957    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D mean Tensor with size matching the last dimension of t.
958This is the first output from tf.nn.moments,
959or a saved moving average thereof.}]>:$m,
960    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D variance Tensor with size matching the last dimension of t.
961This is the second output from tf.nn.moments,
962or a saved moving average thereof.}]>:$v,
963    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D beta Tensor with size matching the last dimension of t.
964An offset to be added to the normalized tensor.}]>:$beta,
965    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 1D gamma Tensor with size matching the last dimension of t.
966If "scale_after_normalization" is true, this tensor will be multiplied
967with the normalized tensor.}]>:$gamma,
968
969    F32Attr:$variance_epsilon,
970    BoolAttr:$scale_after_normalization
971  );
972
973  let results = (outs
974    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$result
975  );
976
977  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
978}
979
980def TF_BatchToSpaceOp : TF_Op<"BatchToSpace", [NoSideEffect]> {
981  let summary = "BatchToSpace for 4-D tensors of type T.";
982
983  let description = [{
984This is a legacy version of the more general BatchToSpaceND.
985
986Rearranges (permutes) data from batch into blocks of spatial data, followed by
987cropping. This is the reverse transformation of SpaceToBatch. More specifically,
988this op outputs a copy of the input tensor where values from the `batch`
989dimension are moved in spatial blocks to the `height` and `width` dimensions,
990followed by cropping along the `height` and `width` dimensions.
991  }];
992
993  let arguments = (ins
994    Arg<TF_Tensor, [{4-D tensor with shape
995`[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
996  depth]`. Note that the batch size of the input tensor must be divisible by
997`block_size * block_size`.}]>:$input,
998    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
999how many elements to crop from the intermediate result across the spatial
1000dimensions as follows:
1001
1002    crops = [[crop_top, crop_bottom], [crop_left, crop_right]]}]>:$crops,
1003
1004    Confined<I64Attr, [IntMinValue<2>]>:$block_size
1005  );
1006
1007  let results = (outs
1008    Res<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`, where:
1009
1010      height = height_pad - crop_top - crop_bottom
1011      width = width_pad - crop_left - crop_right
1012
1013The attr `block_size` must be greater than one. It indicates the block size.
1014
1015Some examples:
1016
1017(1) For the following input of shape `[4, 1, 1, 1]` and block_size of 2:
1018
1019```
1020[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1021```
1022
1023The output tensor has shape `[1, 2, 2, 1]` and value:
1024
1025```
1026x = [[[[1], [2]], [[3], [4]]]]
1027```
1028
1029(2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
1030
1031```
1032[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1033```
1034
1035The output tensor has shape `[1, 2, 2, 3]` and value:
1036
1037```
1038x = [[[[1, 2, 3], [4, 5, 6]],
1039      [[7, 8, 9], [10, 11, 12]]]]
1040```
1041
1042(3) For the following input of shape `[4, 2, 2, 1]` and block_size of 2:
1043
1044```
1045x = [[[[1], [3]], [[9], [11]]],
1046     [[[2], [4]], [[10], [12]]],
1047     [[[5], [7]], [[13], [15]]],
1048     [[[6], [8]], [[14], [16]]]]
1049```
1050
1051The output tensor has shape `[1, 4, 4, 1]` and value:
1052
1053```
1054x = [[[[1],   [2],  [3],  [4]],
1055     [[5],   [6],  [7],  [8]],
1056     [[9],  [10], [11],  [12]],
1057     [[13], [14], [15],  [16]]]]
1058```
1059
1060(4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
1061
1062```
1063x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
1064     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
1065```
1066
1067The output tensor has shape `[2, 2, 4, 1]` and value:
1068
1069```
1070x = [[[[1], [3]], [[5], [7]]],
1071     [[[2], [4]], [[10], [12]]],
1072     [[[5], [7]], [[13], [15]]],
1073     [[[6], [8]], [[14], [16]]]]
1074```}]>:$output
1075  );
1076
1077  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1078  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1079
1080  let verifier = [{
1081    return Verify(*this);
1082  }];
1083
1084  let hasCanonicalizer = 1;
1085}
1086
1087def TF_BatchToSpaceNDOp : TF_Op<"BatchToSpaceND", [NoSideEffect]> {
1088  let summary = "BatchToSpace for N-D tensors of type T.";
1089
1090  let description = [{
1091This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
1092`block_shape + [batch]`, interleaves these blocks back into the grid defined by
1093the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
1094the input.  The spatial dimensions of this intermediate result are then
1095optionally cropped according to `crops` to produce the output.  This is the
1096reverse of SpaceToBatch.  See below for a precise description.
1097  }];
1098
1099  let arguments = (ins
1100    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
1101where spatial_shape has M dimensions.}]>:$input,
1102    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
1103    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
1104  `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
1105  dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
1106  required that
1107  `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.
1108
1109This operation is equivalent to the following steps:
1110
11111. Reshape `input` to `reshaped` of shape:
1112     [block_shape[0], ..., block_shape[M-1],
1113      batch / prod(block_shape),
1114      input_shape[1], ..., input_shape[N-1]]
1115
11162. Permute dimensions of `reshaped` to produce `permuted` of shape
1117     [batch / prod(block_shape),
1118
1119      input_shape[1], block_shape[0],
1120      ...,
1121      input_shape[M], block_shape[M-1],
1122
1123      input_shape[M+1], ..., input_shape[N-1]]
1124
11253. Reshape `permuted` to produce `reshaped_permuted` of shape
1126     [batch / prod(block_shape),
1127
1128      input_shape[1] * block_shape[0],
1129      ...,
1130      input_shape[M] * block_shape[M-1],
1131
1132      input_shape[M+1],
1133      ...,
1134      input_shape[N-1]]
1135
11364. Crop the start and end of dimensions `[1, ..., M]` of
1137   `reshaped_permuted` according to `crops` to produce the output of shape:
1138     [batch / prod(block_shape),
1139
1140      input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
1141      ...,
1142      input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],
1143
1144      input_shape[M+1], ..., input_shape[N-1]]
1145
1146Some examples:
1147
1148(1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
1149    `crops = [[0, 0], [0, 0]]`:
1150
1151```
1152[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
1153```
1154
1155The output tensor has shape `[1, 2, 2, 1]` and value:
1156
1157```
1158x = [[[[1], [2]], [[3], [4]]]]
1159```
1160
1161(2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
1162    `crops = [[0, 0], [0, 0]]`:
1163
1164```
1165[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1166```
1167
1168The output tensor has shape `[1, 2, 2, 3]` and value:
1169
1170```
1171x = [[[[1, 2, 3], [4, 5, 6]],
1172      [[7, 8, 9], [10, 11, 12]]]]
1173```
1174
1175(3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
1176    `crops = [[0, 0], [0, 0]]`:
1177
1178```
1179x = [[[[1], [3]], [[9], [11]]],
1180     [[[2], [4]], [[10], [12]]],
1181     [[[5], [7]], [[13], [15]]],
1182     [[[6], [8]], [[14], [16]]]]
1183```
1184
1185The output tensor has shape `[1, 4, 4, 1]` and value:
1186
1187```
1188x = [[[[1],   [2],  [3],  [4]],
1189     [[5],   [6],  [7],  [8]],
1190     [[9],  [10], [11],  [12]],
1191     [[13], [14], [15],  [16]]]]
1192```
1193
1194(4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
1195    `crops = [[0, 0], [2, 0]]`:
1196
1197```
1198x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
1199     [[[0], [2], [4]]], [[[0], [10], [12]]],
1200     [[[0], [5], [7]]], [[[0], [13], [15]]],
1201     [[[0], [6], [8]]], [[[0], [14], [16]]]]
1202```
1203
1204The output tensor has shape `[2, 2, 4, 1]` and value:
1205
1206```
1207x = [[[[1],   [2],  [3],  [4]],
1208      [[5],   [6],  [7],  [8]]],
1209     [[[9],  [10], [11],  [12]],
1210      [[13], [14], [15],  [16]]]]
1211```}]>:$crops
1212  );
1213
1214  let results = (outs
1215    TF_Tensor:$output
1216  );
1217
1218  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1219  TF_DerivedOperandTypeAttr Tcrops = TF_DerivedOperandTypeAttr<2>;
1220  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
1221
1222  let verifier = [{
1223    return Verify(*this);
1224  }];
1225}
1226
1227def TF_BetaincOp : TF_Op<"Betainc", [NoSideEffect]> {
1228  let summary = [{
1229Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
1230  }];
1231
1232  let description = [{
1233The regularized incomplete beta integral is defined as:
1234
1235
1236\\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
1237
1238where
1239
1240
1241\\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
1242
1243
1244is the incomplete beta function and \\(B(a, b)\\) is the *complete*
1245beta function.
1246  }];
1247
1248  let arguments = (ins
1249    TF_F32OrF64Tensor:$a,
1250    TF_F32OrF64Tensor:$b,
1251    TF_F32OrF64Tensor:$x
1252  );
1253
1254  let results = (outs
1255    TF_F32OrF64Tensor:$z
1256  );
1257
1258  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1259}
1260
1261def TF_BiasAddOp : TF_Op<"BiasAdd", [NoSideEffect, TF_ContractionFusableInterface, TF_LayoutSensitiveInterface]> {
1262  let summary = "Adds `bias` to `value`.";
1263
1264  let description = [{
1265This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1266Broadcasting is supported, so `value` may have any number of dimensions.
1267  }];
1268
1269  let arguments = (ins
1270    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1271    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias,
1272
1273    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1274  );
1275
1276  let results = (outs
1277    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1278  );
1279
1280  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1281
1282  let extraClassDeclaration = [{
1283    // TF_ContractionFusableInterface:
1284    Optional<ContractionFusion> GetContractionFusion();
1285    // TF_LayoutSensitiveInterface:
1286    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
1287    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
1288    StringRef GetOptimalLayout(const RuntimeDevices& devices);
1289    LogicalResult UpdateDataFormat(StringRef data_format);
1290  }];
1291
1292  let verifier = [{
1293    return Verify(*this);
1294  }];
1295}
1296
1297def TF_BiasAddGradOp : TF_Op<"BiasAddGrad", [NoSideEffect]> {
1298  let summary = [{
1299The backward operation for "BiasAdd" on the "bias" tensor.
1300  }];
1301
1302  let description = [{
1303It accumulates all the values from out_backprop into the feature dimension.
1304For NHWC data format, the feature dimension is the last. For NCHW data format,
1305the feature dimension is the third-to-last.
1306  }];
1307
1308  let arguments = (ins
1309    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$out_backprop,
1310
1311    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
1312  );
1313
1314  let results = (outs
1315    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the feature dimension of `out_backprop`.}]>:$output
1316  );
1317
1318  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1319
1320  let verifier = [{
1321    return Verify(*this);
1322  }];
1323}
1324
1325def TF_BiasAddV1Op : TF_Op<"BiasAddV1", [NoSideEffect]> {
1326  let summary = "Adds `bias` to `value`.";
1327
1328  let description = [{
1329This is a deprecated version of BiasAdd and will be soon removed.
1330
1331This is a special case of `tf.add` where `bias` is restricted to be 1-D.
1332Broadcasting is supported, so `value` may have any number of dimensions.
1333  }];
1334
1335  let arguments = (ins
1336    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Any number of dimensions.}]>:$value,
1337    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{1-D with size the last dimension of `value`.}]>:$bias
1338  );
1339
1340  let results = (outs
1341    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Broadcasted sum of `value` and `bias`.}]>:$output
1342  );
1343
1344  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1345
1346  let hasCanonicalizer = 1;
1347}
1348
1349def TF_BincountOp : TF_Op<"Bincount", [NoSideEffect]> {
1350  let summary = [{
1351Counts the number of occurrences of each value in an integer array.
1352  }];
1353
1354  let description = [{
1355Outputs a vector with length `size` and the same dtype as `weights`. If
1356`weights` are empty, then index `i` stores the number of times the value `i` is
1357counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
1358the value in `weights` at each index where the corresponding value in `arr` is
1359`i`.
1360
1361Values in `arr` outside of the range [0, size) are ignored.
1362  }];
1363
1364  let arguments = (ins
1365    Arg<TF_Int32Tensor, [{int32 `Tensor`.}]>:$arr,
1366    Arg<TF_Int32Tensor, [{non-negative int32 scalar `Tensor`.}]>:$size,
1367    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{is an int32, int64, float32, or float64 `Tensor` with the same
1368shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
1369equal to 1.}]>:$weights
1370  );
1371
1372  let results = (outs
1373    Res<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{1D `Tensor` with length equal to `size`. The counts or summed weights for
1374each value in the range [0, size).}]>:$bins
1375  );
1376
1377  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
1378}
1379
1380def TF_BitcastOp : TF_Op<"Bitcast", [NoSideEffect]> {
1381  let summary = [{
1382Bitcasts a tensor from one type to another without copying data.
1383  }];
1384
1385  let description = [{
1386Given a tensor `input`, this operation returns a tensor that has the same buffer
1387data as `input` with datatype `type`.
1388
1389If the input datatype `T` is larger than the output datatype `type` then the
1390shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)].
1391
1392If `T` is smaller than `type`, the operator requires that the rightmost
1393dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
1394[..., sizeof(`type`)/sizeof(`T`)] to [...].
1395
1396tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
1397(e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
1398gives module error.
1399For example,
1400
1401Example 1:
1402
1403>>> a = [1., 2., 3.]
1404>>> equality_bitcast = tf.bitcast(a, tf.complex128)
1405Traceback (most recent call last):
1406...
1407InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast]
1408>>> equality_cast = tf.cast(a, tf.complex128)
1409>>> print(equality_cast)
1410tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
1411
1412Example 2:
1413
1414>>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
1415<tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
1416
1417Example 3:
1418
1419>>> x = [1., 2., 3.]
1420>>> y = [0., 2., 3.]
1421>>> equality= tf.equal(x,y)
1422>>> equality_cast = tf.cast(equality,tf.float32)
1423>>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
1424>>> print(equality)
1425tf.Tensor([False True True], shape=(3,), dtype=bool)
1426>>> print(equality_cast)
1427tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
1428>>> print(equality_bitcast)
1429tf.Tensor(
1430    [[  0   0   0   0]
1431     [  0   0 128  63]
1432     [  0   0 128  63]], shape=(3, 4), dtype=uint8)
1433
1434*NOTE*: Bitcast is implemented as a low-level cast, so machines with different
1435endian orderings will give different results.
1436  }];
1437
1438  let arguments = (ins
1439    TF_NumberTensor:$input
1440  );
1441
1442  let results = (outs
1443    TF_NumberTensor:$output
1444  );
1445
1446  TF_DerivedResultTypeAttr type = TF_DerivedResultTypeAttr<0>;
1447  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1448
1449  let hasCanonicalizer = 1;
1450}
1451
1452def TF_BitwiseAndOp : TF_Op<"BitwiseAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1453                      WithBroadcastableBinOpBuilder {
1454  let summary = "Elementwise computes the bitwise AND of `x` and `y`.";
1455
1456  let description = [{
1457The result will have those bits set, that are set in both `x` and `y`. The
1458computation is performed on the underlying representations of `x` and `y`.
1459
1460For example:
1461
1462```python
1463import tensorflow as tf
1464from tensorflow.python.ops import bitwise_ops
1465dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1466              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1467
1468for dtype in dtype_list:
1469  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1470  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1471  exp = tf.constant([0, 0, 3, 10], dtype=tf.float32)
1472
1473  res = bitwise_ops.bitwise_and(lhs, rhs)
1474  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1475```
1476  }];
1477
1478  let arguments = (ins
1479    TF_IntTensor:$x,
1480    TF_IntTensor:$y
1481  );
1482
1483  let results = (outs
1484    TF_IntTensor:$z
1485  );
1486
1487  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1488}
1489
1490def TF_BitwiseOrOp : TF_Op<"BitwiseOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1491                     WithBroadcastableBinOpBuilder {
1492  let summary = "Elementwise computes the bitwise OR of `x` and `y`.";
1493
1494  let description = [{
1495The result will have those bits set, that are set in `x`, `y` or both. The
1496computation is performed on the underlying representations of `x` and `y`.
1497
1498For example:
1499
1500```python
1501import tensorflow as tf
1502from tensorflow.python.ops import bitwise_ops
1503dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1504              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1505
1506for dtype in dtype_list:
1507  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1508  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1509  exp = tf.constant([5, 5, 7, 15], dtype=tf.float32)
1510
1511  res = bitwise_ops.bitwise_or(lhs, rhs)
1512  tf.assert_equal(tf.cast(res,  tf.float32), exp)  # TRUE
1513```
1514  }];
1515
1516  let arguments = (ins
1517    TF_IntTensor:$x,
1518    TF_IntTensor:$y
1519  );
1520
1521  let results = (outs
1522    TF_IntTensor:$z
1523  );
1524
1525  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1526}
1527
1528def TF_BitwiseXorOp : TF_Op<"BitwiseXor", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
1529                      WithBroadcastableBinOpBuilder {
1530  let summary = "Elementwise computes the bitwise XOR of `x` and `y`.";
1531
1532  let description = [{
1533The result will have those bits set, that are different in `x` and `y`. The
1534computation is performed on the underlying representations of `x` and `y`.
1535
1536For example:
1537
1538```python
1539import tensorflow as tf
1540from tensorflow.python.ops import bitwise_ops
1541dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64,
1542              tf.uint8, tf.uint16, tf.uint32, tf.uint64]
1543
1544for dtype in dtype_list:
1545  lhs = tf.constant([0, 5, 3, 14], dtype=dtype)
1546  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
1547  exp = tf.constant([5, 5, 4, 5],  dtype=tf.float32)
1548
1549  res = bitwise_ops.bitwise_xor(lhs, rhs)
1550  tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE
1551```
1552  }];
1553
1554  let arguments = (ins
1555    TF_IntTensor:$x,
1556    TF_IntTensor:$y
1557  );
1558
1559  let results = (outs
1560    TF_IntTensor:$z
1561  );
1562
1563  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1564}
1565
1566def TF_BoostedTreesBucketizeOp : TF_Op<"BoostedTreesBucketize", [NoSideEffect, SameVariadicOperandSize]> {
1567  let summary = "Bucketize each feature based on bucket boundaries.";
1568
1569  let description = [{
1570An op that returns a list of float tensors, where each tensor represents the
1571bucketized values for a single feature.
1572  }];
1573
1574  let arguments = (ins
1575    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensor each containing float values for a single feature.}]>:$float_values,
1576    Arg<Variadic<TF_Float32Tensor>, [{float; List of Rank 1 Tensors each containing the bucket boundaries for a single
1577feature.}]>:$bucket_boundaries
1578  );
1579
1580  let results = (outs
1581    Res<Variadic<TF_Int32Tensor>, [{int; List of Rank 1 Tensors each containing the bucketized values for a single feature.}]>:$buckets
1582  );
1583
1584  TF_DerivedOperandSizeAttr num_features = TF_DerivedOperandSizeAttr<0>;
1585}
1586
1587def TF_BroadcastArgsOp : TF_Op<"BroadcastArgs", [NoSideEffect]> {
1588  let summary = "Return the shape of s0 op s1 with broadcast.";
1589
1590  let description = [{
1591Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the
1592broadcasted shape. `s0`, `s1` and `r0` are all integer vectors.
1593  }];
1594
1595  let arguments = (ins
1596    TF_I32OrI64Tensor:$s0,
1597    TF_I32OrI64Tensor:$s1
1598  );
1599
1600  let results = (outs
1601    TF_I32OrI64Tensor:$r0
1602  );
1603
1604  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1605}
1606
1607def TF_BroadcastGradientArgsOp : TF_Op<"BroadcastGradientArgs", [NoSideEffect, SameOperandsAndResultElementType, TF_OperandHasRank<0, 1>, TF_OperandHasRank<1, 1>, TF_ResultHasRank<0, 1>, TF_ResultHasRank<1, 1>]> {
1608  let summary = [{
1609Return the reduction indices for computing gradients of s0 op s1 with broadcast.
1610  }];
1611
1612  let description = [{
1613This is typically used by gradient computations for a broadcasting operation.
1614  }];
1615
1616  let arguments = (ins
1617    TF_I32OrI64Tensor:$s0,
1618    TF_I32OrI64Tensor:$s1
1619  );
1620
1621  let results = (outs
1622    TF_I32OrI64Tensor:$r0,
1623    TF_I32OrI64Tensor:$r1
1624  );
1625
1626  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1627
1628  let verifier = [{
1629    return Verify(*this);
1630  }];
1631
1632  let hasFolder = 1;
1633}
1634
1635def TF_BroadcastToOp : TF_Op<"BroadcastTo", [NoSideEffect]> {
1636  let summary = "Broadcast an array for a compatible shape.";
1637
1638  let description = [{
1639Broadcasting is the process of making arrays to have compatible shapes
1640for arithmetic operations. Two shapes are compatible if for each
1641dimension pair they are either equal or one of them is one. When trying
1642to broadcast a Tensor to a shape, it starts with the trailing dimensions,
1643and works its way forward.
1644
1645For example,
1646
1647>>> x = tf.constant([1, 2, 3])
1648>>> y = tf.broadcast_to(x, [3, 3])
1649>>> print(y)
1650tf.Tensor(
1651    [[1 2 3]
1652     [1 2 3]
1653     [1 2 3]], shape=(3, 3), dtype=int32)
1654
1655In the above example, the input Tensor with the shape of `[1, 3]`
1656is broadcasted to output Tensor with shape of `[3, 3]`.
1657
1658When doing broadcasted operations such as multiplying a tensor
1659by a scalar, broadcasting (usually) confers some time or space
1660benefit, as the broadcasted tensor is never materialized.
1661
1662However, `broadcast_to` does not carry with it any such benefits.
1663The newly-created tensor takes the full memory of the broadcasted
1664shape. (In a graph context, `broadcast_to` might be fused to
1665subsequent operation and then be optimized away, however.)
1666  }];
1667
1668  let arguments = (ins
1669    Arg<TF_Tensor, [{A Tensor to broadcast.}]>:$input,
1670    Arg<TF_I32OrI64Tensor, [{An 1-D `int` Tensor. The shape of the desired output.}]>:$shape
1671  );
1672
1673  let results = (outs
1674    Res<TF_Tensor, [{A Tensor.}]>:$output
1675  );
1676
1677  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1678  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
1679
1680  let verifier = [{
1681    return Verify(*this);
1682  }];
1683  let hasFolder = 1;
1684}
1685
1686def TF_BucketizeOp : TF_Op<"Bucketize", [NoSideEffect, SameOperandsAndResultShape]> {
1687  let summary = "Bucketizes 'input' based on 'boundaries'.";
1688
1689  let description = [{
1690For example, if the inputs are
1691    boundaries = [0, 10, 100]
1692    input = [[-5, 10000]
1693             [150,   10]
1694             [5,    100]]
1695
1696then the output will be
1697    output = [[0, 3]
1698              [3, 2]
1699              [1, 3]]
1700  }];
1701
1702  let arguments = (ins
1703    Arg<TensorOf<[TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Any shape of Tensor contains with int or float type.}]>:$input,
1704
1705    F32ArrayAttr:$boundaries
1706  );
1707
1708  let results = (outs
1709    Res<TF_Int32Tensor, [{Same shape with 'input', each value of input replaced with bucket index.
1710
1711@compatibility(numpy)
1712Equivalent to np.digitize.
1713@end_compatibility}]>:$output
1714  );
1715
1716  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1717}
1718
1719def TF_CastOp : TF_Op<"Cast", [NoSideEffect, SameOperandsAndResultShape]> {
1720  let summary = "Cast x of type SrcT to y of DstT.";
1721
1722  let arguments = (ins
1723    TF_Tensor:$x,
1724
1725    DefaultValuedAttr<BoolAttr, "false">:$Truncate
1726  );
1727
1728  let results = (outs
1729    TF_Tensor:$y
1730  );
1731
1732  TF_DerivedOperandTypeAttr SrcT = TF_DerivedOperandTypeAttr<0>;
1733  TF_DerivedResultTypeAttr DstT = TF_DerivedResultTypeAttr<0>;
1734
1735  let hasFolder = 1;
1736}
1737
1738def TF_CeilOp : TF_Op<"Ceil", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
1739  let summary = "Returns element-wise smallest integer not less than x.";
1740
1741  let arguments = (ins
1742    TF_FloatTensor:$x
1743  );
1744
1745  let results = (outs
1746    TF_FloatTensor:$y
1747  );
1748
1749  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1750}
1751
1752def TF_CheckNumericsOp : TF_Op<"CheckNumerics", [TF_SameOperandsAndResultTypeResolveRef]> {
1753  let summary = "Checks a tensor for NaN and Inf values.";
1754
1755  let description = [{
1756When run, reports an `InvalidArgument` error if `tensor` has any values
1757that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is.
1758  }];
1759
1760  let arguments = (ins
1761    TF_FloatTensor:$tensor,
1762
1763    StrAttr:$message
1764  );
1765
1766  let results = (outs
1767    TF_FloatTensor:$output
1768  );
1769
1770  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1771}
1772
1773def TF_CholeskyOp : TF_Op<"Cholesky", [NoSideEffect]> {
1774  let summary = [{
1775Computes the Cholesky decomposition of one or more square matrices.
1776  }];
1777
1778  let description = [{
1779The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1780form square matrices.
1781
1782The input has to be symmetric and positive definite. Only the lower-triangular
1783part of the input will be used for this operation. The upper-triangular part
1784will not be read.
1785
1786The output is a tensor of the same shape as the input
1787containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
1788
1789**Note**: The gradient computation on GPU is faster for large matrices but
1790not for large batch dimensions when the submatrices are small. In this
1791case it might be faster to use the CPU.
1792  }];
1793
1794  let arguments = (ins
1795    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input
1796  );
1797
1798  let results = (outs
1799    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$output
1800  );
1801
1802  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1803}
1804
1805def TF_ClipByValueOp : TF_Op<"ClipByValue", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
1806  let summary = "Clips tensor values to a specified min and max.";
1807
1808  let description = [{
1809Given a tensor `t`, this operation returns a tensor of the same type and
1810shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
1811Any values less than `clip_value_min` are set to `clip_value_min`. Any values
1812greater than `clip_value_max` are set to `clip_value_max`.
1813  }];
1814
1815  let arguments = (ins
1816    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`.}]>:$t,
1817    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1818as `t`. The minimum value to clip by.}]>:$clip_value_min,
1819    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1820as `t`. The maximum value to clip by.}]>:$clip_value_max
1821  );
1822
1823  let results = (outs
1824    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A clipped `Tensor` with the same shape as input 't'.}]>:$output
1825  );
1826
1827  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1828}
1829
1830def TF_CollectiveBcastRecvOp : TF_Op<"CollectiveBcastRecv", []> {
1831  let summary = "Receives a tensor value broadcast from another device.";
1832
1833  let arguments = (ins
1834    I64Attr:$group_size,
1835    I64Attr:$group_key,
1836    I64Attr:$instance_key,
1837    TF_ShapeAttr:$shape,
1838    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1839    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1840  );
1841
1842  let results = (outs
1843    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1844  );
1845
1846  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
1847}
1848
1849def TF_CollectiveBcastSendOp : TF_Op<"CollectiveBcastSend", []> {
1850  let summary = "Broadcasts a tensor value to one or more other devices.";
1851
1852  let arguments = (ins
1853    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1854
1855    I64Attr:$group_size,
1856    I64Attr:$group_key,
1857    I64Attr:$instance_key,
1858    TF_ShapeAttr:$shape,
1859    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1860    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1861  );
1862
1863  let results = (outs
1864    TensorOf<[TF_Bool, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1865  );
1866
1867  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1868}
1869
1870def TF_CollectiveGatherOp : TF_Op<"CollectiveGather", []> {
1871  let summary = [{
1872Mutually accumulates multiple tensors of identical type and shape.
1873  }];
1874
1875  let arguments = (ins
1876    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1877
1878    I64Attr:$group_size,
1879    I64Attr:$group_key,
1880    I64Attr:$instance_key,
1881    TF_ShapeAttr:$shape,
1882    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1883    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1884  );
1885
1886  let results = (outs
1887    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1888  );
1889
1890  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1891}
1892
1893def TF_CollectiveReduceOp : TF_Op<"CollectiveReduce", [TF_SameOperandsAndResultTypeResolveRef]> {
1894  let summary = [{
1895Mutually reduces multiple tensors of identical type and shape.
1896  }];
1897
1898  let arguments = (ins
1899    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1900
1901    I64Attr:$group_size,
1902    I64Attr:$group_key,
1903    I64Attr:$instance_key,
1904    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
1905    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
1906    I64ArrayAttr:$subdiv_offsets,
1907    DefaultValuedAttr<I64ArrayAttr, "{}">:$wait_for,
1908    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1909    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1910  );
1911
1912  let results = (outs
1913    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1914  );
1915
1916  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1917}
1918
1919def TF_CollectiveReduceV2Op : TF_Op<"CollectiveReduceV2", []> {
1920  let summary = [{
1921Mutually reduces multiple tensors of identical type and shape.
1922  }];
1923
1924  let arguments = (ins
1925    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$input,
1926    TF_Int32Tensor:$group_size,
1927    TF_Int32Tensor:$group_key,
1928    TF_Int32Tensor:$instance_key,
1929    Variadic<TF_ResourceTensor>:$ordering_token,
1930
1931    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add"]>:$merge_op,
1932    TF_AnyStrAttrOf<["Id", "Div"]>:$final_op,
1933    DefaultValuedAttr<StrAttr, "auto">:$communication_hint,
1934    DefaultValuedAttr<F32Attr, "0.0f">:$timeout_seconds
1935  );
1936
1937  let results = (outs
1938    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$data
1939  );
1940
1941  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1942  TF_DerivedOperandSizeAttr Nordering_token = TF_DerivedOperandSizeAttr<4>;
1943}
1944
1945def TF_ComplexOp : TF_Op<"Complex", [NoSideEffect, ResultsBroadcastableShape]> {
1946  let summary = "Converts two real numbers to a complex number.";
1947
1948  let description = [{
1949Given a tensor `real` representing the real part of a complex number, and a
1950tensor `imag` representing the imaginary part of a complex number, this
1951operation returns complex numbers elementwise of the form \\(a + bj\\), where
1952*a* represents the `real` part and *b* represents the `imag` part.
1953
1954The input tensors `real` and `imag` must have the same shape.
1955
1956For example:
1957
1958```
1959# tensor 'real' is [2.25, 3.25]
1960# tensor `imag` is [4.75, 5.75]
1961tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
1962```
1963  }];
1964
1965  let arguments = (ins
1966    TF_F32OrF64Tensor:$real,
1967    TF_F32OrF64Tensor:$imag
1968  );
1969
1970  let results = (outs
1971    TensorOf<[TF_Complex128, TF_Complex64]>:$out
1972  );
1973
1974  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1975  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
1976}
1977
1978def TF_ComplexAbsOp : TF_Op<"ComplexAbs", [NoSideEffect, SameOperandsAndResultShape]> {
1979  let summary = "Computes the complex absolute value of a tensor.";
1980
1981  let description = [{
1982Given a tensor `x` of complex numbers, this operation returns a tensor of type
1983`float` or `double` that is the absolute value of each element in `x`. All
1984elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
1985value is computed as \\( \sqrt{a^2 + b^2}\\).
1986  }];
1987
1988  let arguments = (ins
1989    TensorOf<[TF_Complex128, TF_Complex64]>:$x
1990  );
1991
1992  let results = (outs
1993    TF_F32OrF64Tensor:$y
1994  );
1995
1996  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1997  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
1998}
1999
2000def TF_ConcatOp : TF_Op<"Concat", [NoSideEffect]> {
2001  let summary = "Concatenates tensors along one dimension.";
2002
2003  let arguments = (ins
2004    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
2005range [0, rank(values)).}]>:$concat_dim,
2006    Arg<Variadic<TF_Tensor>, [{The `N` Tensors to concatenate. Their ranks and types must match,
2007and their sizes must match in all dimensions except `concat_dim`.}]>:$values
2008  );
2009
2010  let results = (outs
2011    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
2012`concat_dim` dimension.  This tensor's shape matches that of `values` except
2013in `concat_dim` where it has the sum of the sizes.}]>:$output
2014  );
2015
2016  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2017  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2018
2019  let verifier = [{
2020    return Verify(*this);
2021  }];
2022
2023  let hasCanonicalizer = 1;
2024}
2025
2026def TF_ConcatOffsetOp : TF_Op<"ConcatOffset", [NoSideEffect]> {
2027  let summary = "Computes offsets of concat inputs within its output.";
2028
2029  let description = [{
2030For example:
2031
2032```
2033# 'x' is [2, 2, 7]
2034# 'y' is [2, 3, 7]
2035# 'z' is [2, 5, 7]
2036concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0]
2037```
2038
2039This is typically used by gradient computations for a concat operation.
2040  }];
2041
2042  let arguments = (ins
2043    Arg<TF_Int32Tensor, [{The dimension along which to concatenate.}]>:$concat_dim,
2044    Arg<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing shape of tensors being concatenated.}]>:$shape
2045  );
2046
2047  let results = (outs
2048    Res<Variadic<TF_Int32Tensor>, [{The `N` int32 vectors representing the starting offset
2049of input tensors within the concatenated output.}]>:$offset
2050  );
2051
2052  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2053
2054  let verifier = [{
2055    return Verify(*this);
2056  }];
2057
2058  let hasFolder = 1;
2059}
2060
2061def TF_ConcatV2Op : TF_Op<"ConcatV2", [NoSideEffect]> {
2062  let summary = "Concatenates tensors along one dimension.";
2063
2064  let arguments = (ins
2065    Arg<Variadic<TF_Tensor>, [{List of `N` Tensors to concatenate. Their ranks and types must match,
2066and their sizes must match in all dimensions except `concat_dim`.}]>:$values,
2067    Arg<TF_I32OrI64Tensor, [{0-D.  The dimension along which to concatenate.  Must be in the
2068range [-rank(values), rank(values)).}]>:$axis
2069  );
2070
2071  let results = (outs
2072    Res<TF_Tensor, [{A `Tensor` with the concatenation of values stacked along the
2073`concat_dim` dimension.  This tensor's shape matches that of `values` except
2074in `concat_dim` where it has the sum of the sizes.}]>:$output
2075  );
2076
2077  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2078  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2079  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
2080
2081  let verifier = [{
2082    return Verify(*this);
2083  }];
2084
2085  let hasCanonicalizer = 1;
2086}
2087
2088def TF_ConfigureDistributedTPUOp : TF_Op<"ConfigureDistributedTPU", []> {
2089  let summary = [{
2090Sets up the centralized structures for a distributed TPU system.
2091  }];
2092
2093  let arguments = (ins
2094    StrAttr:$embedding_config,
2095    StrAttr:$tpu_embedding_config,
2096    DefaultValuedAttr<BoolAttr, "false">:$is_global_init,
2097    DefaultValuedAttr<BoolAttr, "false">:$enable_whole_mesh_compilations,
2098    DefaultValuedAttr<BoolAttr, "true">:$compilation_failure_closes_chips
2099  );
2100
2101  let results = (outs
2102    Res<TF_StrTensor, [{A serialized tensorflow.tpu.TopologyProto that describes the TPU
2103topology.}]>:$topology
2104  );
2105}
2106
2107def TF_ConfigureTPUEmbeddingOp : TF_Op<"ConfigureTPUEmbedding", []> {
2108  let summary = "Sets up TPUEmbedding in a distributed TPU system.";
2109
2110  let arguments = (ins
2111    StrAttr:$config
2112  );
2113
2114  let results = (outs);
2115}
2116
2117def TF_ConjOp : TF_Op<"Conj", [Involution, NoSideEffect, SameOperandsAndResultType]> {
2118  let summary = "Returns the complex conjugate of a complex number.";
2119
2120  let description = [{
2121Given a tensor `input` of complex numbers, this operation returns a tensor of
2122complex numbers that are the complex conjugate of each element in `input`. The
2123complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
2124real part and *b* is the imaginary part.
2125
2126The complex conjugate returned by this operation is of the form \\(a - bj\\).
2127
2128For example:
2129
2130```
2131# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
2132tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
2133```
2134  }];
2135
2136  let arguments = (ins
2137    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$input
2138  );
2139
2140  let results = (outs
2141    TensorOf<[TF_Complex128, TF_Complex64, TF_Variant]>:$output
2142  );
2143
2144  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2145}
2146
2147def TF_ConjugateTransposeOp : TF_Op<"ConjugateTranspose", [NoSideEffect]> {
2148  let summary = [{
2149Shuffle dimensions of x according to a permutation and conjugate the result.
2150  }];
2151
2152  let description = [{
2153The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
2154  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
2155  `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])`
2156  }];
2157
2158  let arguments = (ins
2159    TF_Tensor:$x,
2160    TF_I32OrI64Tensor:$perm
2161  );
2162
2163  let results = (outs
2164    TF_Tensor:$y
2165  );
2166
2167  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2168  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
2169}
2170
2171def TF_Conv2DOp : TF_Op<"Conv2D", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect, TF_LayoutSensitiveInterface]> {
2172  let summary = [{
2173Computes a 2-D convolution given 4-D `input` and `filter` tensors.
2174  }];
2175
2176  let description = [{
2177Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
2178and a filter / kernel tensor of shape
2179`[filter_height, filter_width, in_channels, out_channels]`, this op
2180performs the following:
2181
21821. Flattens the filter to a 2-D matrix with shape
2183   `[filter_height * filter_width * in_channels, output_channels]`.
21842. Extracts image patches from the input tensor to form a *virtual*
2185   tensor of shape `[batch, out_height, out_width,
2186   filter_height * filter_width * in_channels]`.
21873. For each patch, right-multiplies the filter matrix and the image patch
2188   vector.
2189
2190In detail, with the default NHWC format,
2191
2192    output[b, i, j, k] =
2193        sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *
2194                        filter[di, dj, q, k]
2195
2196Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
2197horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
2198  }];
2199
2200  let arguments = (ins
2201    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is interpreted according to the value
2202of `data_format`, see below for details.}]>:$input,
2203    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor of shape
2204`[filter_height, filter_width, in_channels, out_channels]`}]>:$filter,
2205
2206    I64ArrayAttr:$strides,
2207    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2208    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2209    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2210    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2211    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2212  );
2213
2214  let results = (outs
2215    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{A 4-D tensor. The dimension order is determined by the value of
2216`data_format`, see below for details.}]>:$output
2217  );
2218
2219  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2220
2221  let verifier = [{
2222    return Verify(*this);
2223  }];
2224
2225  let extraClassDeclaration = [{
2226    // TF_LayoutSensitiveInterface:
2227    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
2228    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
2229    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2230    LogicalResult UpdateDataFormat(StringRef data_format);
2231    // InferTypeOpInterface:
2232    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
2233      return ArraysAreCastCompatible(l, r);
2234    }
2235  }];
2236}
2237
2238def TF_Conv2DBackpropFilterOp : TF_Op<"Conv2DBackpropFilter", [NoSideEffect, TF_LayoutSensitiveInterface]> {
2239  let summary = [{
2240Computes the gradients of convolution with respect to the filter.
2241  }];
2242
2243  let arguments = (ins
2244    Arg<TF_FloatTensor, [{4-D with shape `[batch, in_height, in_width, in_channels]`.}]>:$input,
2245    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2246where `filter` is a 4-D
2247`[filter_height, filter_width, in_channels, out_channels]` tensor.}]>:$filter_sizes,
2248    Arg<TF_FloatTensor, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2249Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2250
2251    I64ArrayAttr:$strides,
2252    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2253    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2254    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2255    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2256    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2257  );
2258
2259  let results = (outs
2260    Res<TF_FloatTensor, [{4-D with shape
2261`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
2262the `filter` input of the convolution.}]>:$output
2263  );
2264
2265  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2266
2267  let extraClassDeclaration = [{
2268    // TF_LayoutSensitiveInterface:
2269    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0, 2}; }
2270    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {}; }
2271    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2272    LogicalResult UpdateDataFormat(StringRef data_format);
2273  }];
2274}
2275
2276def TF_Conv2DBackpropInputOp : TF_Op<"Conv2DBackpropInput", [NoSideEffect, TF_LayoutSensitiveInterface]> {
2277  let summary = [{
2278Computes the gradients of convolution with respect to the input.
2279  }];
2280
2281  let arguments = (ins
2282    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`,
2283where `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
2284    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape
2285`[filter_height, filter_width, in_channels, out_channels]`.}]>:$filter,
2286    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, out_height, out_width, out_channels]`.
2287Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
2288
2289    I64ArrayAttr:$strides,
2290    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
2291    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
2292    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
2293    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
2294    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
2295  );
2296
2297  let results = (outs
2298    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32]>, [{4-D with shape `[batch, in_height, in_width, in_channels]`.  Gradient
2299w.r.t. the input of the convolution.}]>:$output
2300  );
2301
2302  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2303
2304  let verifier = [{
2305    return Verify(*this);
2306  }];
2307
2308  let extraClassDeclaration = [{
2309    // TF_LayoutSensitiveInterface:
2310    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {2}; }
2311    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
2312    StringRef GetOptimalLayout(const RuntimeDevices& devices);
2313    LogicalResult UpdateDataFormat(StringRef data_format);
2314  }];
2315}
2316
2317def TF_Conv3DOp : TF_Op<"Conv3D", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect]> {
2318  let summary = [{
2319Computes a 3-D convolution given 5-D `input` and `filter` tensors.
2320  }];
2321
2322  let description = [{
2323In signal processing, cross-correlation is a measure of similarity of
2324two waveforms as a function of a time-lag applied to one of them. This
2325is also known as a sliding dot product or sliding inner-product.
2326
2327Our Conv3D implements a form of cross-correlation.
2328  }];
2329
2330  let arguments = (ins
2331    Arg<TF_FloatTensor, [{Shape `[batch, in_depth, in_height, in_width, in_channels]`.}]>:$input,
2332    Arg<TF_FloatTensor, [{Shape `[filter_depth, filter_height, filter_width, in_channels,
2333out_channels]`. `in_channels` must match between `input` and `filter`.}]>:$filter,
2334
2335    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2336    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2337    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2338    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2339  );
2340
2341  let results = (outs
2342    TF_FloatTensor:$output
2343  );
2344
2345  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2346
2347  let verifier = [{
2348    return Verify(*this);
2349  }];
2350
2351  let extraClassDeclaration = [{
2352    // InferTypeOpInterface:
2353    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
2354      return ArraysAreCastCompatible(l, r);
2355    }
2356  }];
2357
2358}
2359
2360def TF_Conv3DBackpropFilterV2Op : TF_Op<"Conv3DBackpropFilterV2", [NoSideEffect]> {
2361  let summary = [{
2362Computes the gradients of 3-D convolution with respect to the filter.
2363  }];
2364
2365  let arguments = (ins
2366    Arg<TF_FloatTensor, [{Shape `[batch, depth, rows, cols, in_channels]`.}]>:$input,
2367    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
2368where `filter` is a 5-D
2369`[filter_depth, filter_height, filter_width, in_channels, out_channels]`
2370tensor.}]>:$filter_sizes,
2371    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2372out_channels]`.}]>:$out_backprop,
2373
2374    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2375    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2376    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2377    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2378  );
2379
2380  let results = (outs
2381    TF_FloatTensor:$output
2382  );
2383
2384  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2385}
2386
2387def TF_Conv3DBackpropInputV2Op : TF_Op<"Conv3DBackpropInputV2", [NoSideEffect]> {
2388  let summary = [{
2389Computes the gradients of 3-D convolution with respect to the input.
2390  }];
2391
2392  let arguments = (ins
2393    Arg<TF_I32OrI64Tensor, [{An integer vector representing the tensor shape of `input`,
2394where `input` is a 5-D
2395`[batch, depth, rows, cols, in_channels]` tensor.}]>:$input_sizes,
2396    Arg<TF_FloatTensor, [{Shape `[depth, rows, cols, in_channels, out_channels]`.
2397`in_channels` must match between `input` and `filter`.}]>:$filter,
2398    Arg<TF_FloatTensor, [{Backprop signal of shape `[batch, out_depth, out_rows, out_cols,
2399out_channels]`.}]>:$out_backprop,
2400
2401    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
2402    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
2403    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format,
2404    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1, 1}">:$dilations
2405  );
2406
2407  let results = (outs
2408    TF_FloatTensor:$output
2409  );
2410
2411  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
2412  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
2413}
2414
2415def TF_CosOp : TF_Op<"Cos", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2416  let summary = "Computes cos of x element-wise.";
2417
2418  let description = [{
2419Given an input tensor, this function computes cosine of every
2420  element in the tensor. Input range is `(-inf, inf)` and
2421  output range is `[-1,1]`. If input lies outside the boundary, `nan`
2422  is returned.
2423
2424  ```python
2425  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
2426  tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 nan]
2427  ```
2428  }];
2429
2430  let arguments = (ins
2431    TF_FpOrComplexTensor:$x
2432  );
2433
2434  let results = (outs
2435    TF_FpOrComplexTensor:$y
2436  );
2437
2438  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2439}
2440
2441def TF_CoshOp : TF_Op<"Cosh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2442  let summary = "Computes hyperbolic cosine of x element-wise.";
2443
2444  let description = [{
2445Given an input tensor, this function computes hyperbolic cosine of every
2446  element in the tensor. Input range is `[-inf, inf]` and output range
2447  is `[1, inf]`.
2448
2449  ```python
2450  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
2451  tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 3.7621956e+00 1.1013233e+04 inf]
2452  ```
2453  }];
2454
2455  let arguments = (ins
2456    TF_FpOrComplexTensor:$x
2457  );
2458
2459  let results = (outs
2460    TF_FpOrComplexTensor:$y
2461  );
2462
2463  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2464}
2465
2466def TF_CrossOp : TF_Op<"Cross", [NoSideEffect, SameOperandsAndResultType]> {
2467  let summary = "Compute the pairwise cross product.";
2468
2469  let description = [{
2470`a` and `b` must be the same shape; they can either be simple 3-element vectors,
2471or any shape where the innermost dimension is 3. In the latter case, each pair
2472of corresponding 3-element vectors is cross-multiplied independently.
2473  }];
2474
2475  let arguments = (ins
2476    Arg<TF_IntOrFpTensor, [{A tensor containing 3-element vectors.}]>:$a,
2477    Arg<TF_IntOrFpTensor, [{Another tensor, of same type and shape as `a`.}]>:$b
2478  );
2479
2480  let results = (outs
2481    Res<TF_IntOrFpTensor, [{Pairwise cross product of the vectors in `a` and `b`.}]>:$product
2482  );
2483
2484  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2485}
2486
2487def TF_CrossReplicaSumOp : TF_Op<"CrossReplicaSum", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>, TF_NoConstantFold]> {
2488  let summary = "An Op to sum inputs across replicated TPU instances.";
2489
2490  let description = [{
2491Each instance supplies its own input.
2492
2493For example, suppose there are 8 TPU instances: `[A, B, C, D, E, F, G, H]`.
2494Passing group_assignment=`[[0,2,4,6],[1,3,5,7]]` sets `A, C, E, G` as group 0,
2495and `B, D, F, H` as group 1. Thus we get the outputs:
2496`[A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H, A+C+E+G, B+D+F+H]`.
2497  }];
2498
2499  let arguments = (ins
2500    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The local input to the sum.}]>:$input,
2501    Arg<TF_Int32Tensor, [{An int32 tensor with shape
2502[num_groups, num_replicas_per_group]. `group_assignment[i]` represents the
2503replica ids in the ith subgroup.}]>:$group_assignment
2504  );
2505
2506  let results = (outs
2507    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>, [{The sum of all the distributed inputs.}]>:$output
2508  );
2509
2510  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2511}
2512
2513def TF_CumprodOp : TF_Op<"Cumprod", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2514  let summary = [{
2515Compute the cumulative product of the tensor `x` along `axis`.
2516  }];
2517
2518  let description = [{
2519By default, this op performs an inclusive cumprod, which means that the first
2520element of the input is identical to the first element of the output:
2521
2522```python
2523tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
2524```
2525
2526By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
2527performed instead:
2528
2529```python
2530tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
2531```
2532
2533By setting the `reverse` kwarg to `True`, the cumprod is performed in the
2534opposite direction:
2535
2536```python
2537tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
2538```
2539
2540This is more efficient than using separate `tf.reverse` ops.
2541
2542The `reverse` and `exclusive` kwargs can also be combined:
2543
2544```python
2545tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
2546```
2547  }];
2548
2549  let arguments = (ins
2550    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2551`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2552`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2553    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2554`[-rank(x), rank(x))`.}]>:$axis,
2555
2556    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2557    DefaultValuedAttr<BoolAttr, "false">:$reverse
2558  );
2559
2560  let results = (outs
2561    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2562  );
2563
2564  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2565  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2566
2567  let verifier = [{
2568    return Verify(*this);
2569  }];
2570}
2571
2572def TF_CumsumOp : TF_Op<"Cumsum", [NoSideEffect, TF_AllTypesMatch<["x", "out"]>]> {
2573  let summary = "Compute the cumulative sum of the tensor `x` along `axis`.";
2574
2575  let description = [{
2576By default, this op performs an inclusive cumsum, which means that the first
2577element of the input is identical to the first element of the output:
2578
2579```python
2580tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
2581```
2582
2583By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
2584performed instead:
2585
2586```python
2587tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
2588```
2589
2590By setting the `reverse` kwarg to `True`, the cumsum is performed in the
2591opposite direction:
2592
2593```python
2594tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
2595```
2596
2597This is more efficient than using separate `tf.reverse` ops.
2598
2599The `reverse` and `exclusive` kwargs can also be combined:
2600
2601```python
2602tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
2603```
2604  }];
2605
2606  let arguments = (ins
2607    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A `Tensor`. Must be one of the following types: `float32`, `float64`,
2608`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2609`complex128`, `qint8`, `quint8`, `qint32`, `half`.}]>:$x,
2610    Arg<TF_I32OrI64Tensor, [{A `Tensor` of type `int32` (default: 0). Must be in the range
2611`[-rank(x), rank(x))`.}]>:$axis,
2612
2613    DefaultValuedAttr<BoolAttr, "false">:$exclusive,
2614    DefaultValuedAttr<BoolAttr, "false">:$reverse
2615  );
2616
2617  let results = (outs
2618    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$out
2619  );
2620
2621  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2622  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
2623
2624  let verifier = [{
2625    return Verify(*this);
2626  }];
2627}
2628
2629def TF_DataFormatDimMapOp : TF_Op<"DataFormatDimMap", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
2630  let summary = [{
2631Returns the dimension index in the destination data format given the one in
2632  }];
2633
2634  let description = [{
2635the source data format.
2636  }];
2637
2638  let arguments = (ins
2639    Arg<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in source data format.
2640Must be in the range [-4, 4).}]>:$x,
2641
2642    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2643    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2644  );
2645
2646  let results = (outs
2647    Res<TF_I32OrI64Tensor, [{A Tensor with each element as a dimension index in destination data format.}]>:$y
2648  );
2649
2650  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2651}
2652
2653def TF_DataFormatVecPermuteOp : TF_Op<"DataFormatVecPermute", [NoSideEffect, SameOperandsAndResultType]> {
2654  let summary = "Permute input tensor from `src_format` to `dst_format`.";
2655
2656  let description = [{
2657Input tensor must be a vector of size 4, or a 4x2 tensor.
2658
2659For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs:
2660```
2661[1, 2, 3, 4]
2662```
2663and
2664```
2665[[1, 2, 3, 4],
2666 [5, 6, 7, 8]]
2667```
2668, the outputs will be (respectively):
2669```
2670[1, 4, 2, 3]
2671```
2672and
2673```
2674[[1, 4, 2, 3],
2675 [5, 8, 6, 7]]
2676```
2677  }];
2678
2679  let arguments = (ins
2680    Arg<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in source data format.}]>:$x,
2681
2682    DefaultValuedAttr<StrAttr, "NHWC">:$src_format,
2683    DefaultValuedAttr<StrAttr, "NCHW">:$dst_format
2684  );
2685
2686  let results = (outs
2687    Res<TF_I32OrI64Tensor, [{Vector of size 4 or Tensor of shape (4, 2) in destination data format.}]>:$y
2688  );
2689
2690  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2691
2692  let verifier = [{ return Verify(*this); }];
2693}
2694
2695def TF_DebugIdentityV2Op : TF_Op<"DebugIdentityV2", []> {
2696  let summary = "Debug Identity V2 Op.";
2697
2698  let description = [{
2699Provides an identity mapping from input to output, while writing the content of
2700the input tensor by calling DebugEventsWriter.
2701
2702The semantics of the input tensor depends on tensor_debug_mode. In typical
2703usage, the input tensor comes directly from the user computation only when
2704graph_debug_mode is FULL_TENSOR (see protobuf/debug_event.proto for a
2705list of all the possible values of graph_debug_mode). For the other debug modes,
2706the input tensor should be produced by an additional op or subgraph that
2707computes summary information about one or more tensors.
2708  }];
2709
2710  let arguments = (ins
2711    Arg<TF_Tensor, [{Input tensor, non-Reference type}]>:$input,
2712
2713    StrAttr:$tfdbg_context_id,
2714    StrAttr:$op_name,
2715    DefaultValuedAttr<I64Attr, "-1">:$output_slot,
2716    DefaultValuedAttr<I64Attr, "-1">:$tensor_debug_mode,
2717    DefaultValuedAttr<StrArrayAttr, "{}">:$debug_urls,
2718    DefaultValuedAttr<I64Attr, "1000">:$circular_buffer_size,
2719    StrAttr:$tfdbg_run_id
2720  );
2721
2722  let results = (outs
2723    TF_Tensor:$output
2724  );
2725
2726  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2727}
2728
2729def TF_DecodeAndCropJpegOp : TF_Op<"DecodeAndCropJpeg", [NoSideEffect]> {
2730  let summary = "Decode and Crop a JPEG-encoded image to a uint8 tensor.";
2731
2732  let description = [{
2733The attr `channels` indicates the desired number of color channels for the
2734decoded image.
2735
2736Accepted values are:
2737
2738*   0: Use the number of channels in the JPEG-encoded image.
2739*   1: output a grayscale image.
2740*   3: output an RGB image.
2741
2742If needed, the JPEG-encoded image is transformed to match the requested number
2743of color channels.
2744
2745The attr `ratio` allows downscaling the image by an integer factor during
2746decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
2747downscaling the image later.
2748
2749
2750It is equivalent to a combination of decode and crop, but much faster by only
2751decoding partial jpeg image.
2752  }];
2753
2754  let arguments = (ins
2755    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
2756    Arg<TF_Int32Tensor, [{1-D.  The crop window: [crop_y, crop_x, crop_height, crop_width].}]>:$crop_window,
2757
2758    DefaultValuedAttr<I64Attr, "0">:$channels,
2759    DefaultValuedAttr<I64Attr, "1">:$ratio,
2760    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
2761    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
2762    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
2763    StrAttr:$dct_method
2764  );
2765
2766  let results = (outs
2767    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
2768  );
2769}
2770
2771def TF_DecodeGifOp : TF_Op<"DecodeGif", [NoSideEffect]> {
2772  let summary = "Decode the frame(s) of a GIF-encoded image to a uint8 tensor.";
2773
2774  let description = [{
2775GIF images with frame or transparency compression are not supported.
2776On Linux and MacOS systems, convert animated GIFs from compressed to
2777uncompressed by running:
2778
2779    convert $src.gif -coalesce $dst.gif
2780
2781This op also supports decoding JPEGs and PNGs, though it is cleaner to use
2782`tf.io.decode_image`.
2783  }];
2784
2785  let arguments = (ins
2786    Arg<TF_StrTensor, [{0-D.  The GIF-encoded image.}]>:$contents
2787  );
2788
2789  let results = (outs
2790    Res<TF_Uint8Tensor, [{4-D with shape `[num_frames, height, width, 3]`. RGB channel order.}]>:$image
2791  );
2792}
2793
2794def TF_DecodeJpegOp : TF_Op<"DecodeJpeg", [NoSideEffect]> {
2795  let summary = "Decode a JPEG-encoded image to a uint8 tensor.";
2796
2797  let description = [{
2798The attr `channels` indicates the desired number of color channels for the
2799decoded image.
2800
2801Accepted values are:
2802
2803*   0: Use the number of channels in the JPEG-encoded image.
2804*   1: output a grayscale image.
2805*   3: output an RGB image.
2806
2807If needed, the JPEG-encoded image is transformed to match the requested number
2808of color channels.
2809
2810The attr `ratio` allows downscaling the image by an integer factor during
2811decoding.  Allowed values are: 1, 2, 4, and 8.  This is much faster than
2812downscaling the image later.
2813
2814
2815This op also supports decoding PNGs and non-animated GIFs since the interface is
2816the same, though it is cleaner to use `tf.io.decode_image`.
2817  }];
2818
2819  let arguments = (ins
2820    Arg<TF_StrTensor, [{0-D.  The JPEG-encoded image.}]>:$contents,
2821
2822    DefaultValuedAttr<I64Attr, "0">:$channels,
2823    DefaultValuedAttr<I64Attr, "1">:$ratio,
2824    DefaultValuedAttr<BoolAttr, "true">:$fancy_upscaling,
2825    DefaultValuedAttr<BoolAttr, "false">:$try_recover_truncated,
2826    DefaultValuedAttr<F32Attr, "1.0f">:$acceptable_fraction,
2827    StrAttr:$dct_method
2828  );
2829
2830  let results = (outs
2831    Res<TF_Uint8Tensor, [{3-D with shape `[height, width, channels]`..}]>:$image
2832  );
2833}
2834
2835def TF_DecodePngOp : TF_Op<"DecodePng", [NoSideEffect]> {
2836  let summary = "Decode a PNG-encoded image to a uint8 or uint16 tensor.";
2837
2838  let description = [{
2839The attr `channels` indicates the desired number of color channels for the
2840decoded image.
2841
2842Accepted values are:
2843
2844*   0: Use the number of channels in the PNG-encoded image.
2845*   1: output a grayscale image.
2846*   3: output an RGB image.
2847*   4: output an RGBA image.
2848
2849If needed, the PNG-encoded image is transformed to match the requested number
2850of color channels.
2851
2852This op also supports decoding JPEGs and non-animated GIFs since the interface
2853is the same, though it is cleaner to use `tf.io.decode_image`.
2854  }];
2855
2856  let arguments = (ins
2857    Arg<TF_StrTensor, [{0-D.  The PNG-encoded image.}]>:$contents,
2858
2859    DefaultValuedAttr<I64Attr, "0">:$channels
2860  );
2861
2862  let results = (outs
2863    Res<TensorOf<[TF_Uint16, TF_Uint8]>, [{3-D with shape `[height, width, channels]`.}]>:$image
2864  );
2865
2866  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
2867}
2868
2869def TF_DeleteIteratorOp : TF_Op<"DeleteIterator", []> {
2870  let summary = "A container for an iterator resource.";
2871
2872  let arguments = (ins
2873    Arg<TF_ResourceTensor, [{A handle to the iterator to delete.}], [TF_DatasetIteratorFree]>:$handle,
2874    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
2875  );
2876
2877  let results = (outs);
2878}
2879
2880def TF_DeleteMemoryCacheOp : TF_Op<"DeleteMemoryCache", []> {
2881  let summary = "";
2882
2883  let arguments = (ins
2884    Arg<TF_ResourceTensor, "", [TF_DatasetMemoryCacheFree]>:$handle,
2885    TF_VariantTensor:$deleter
2886  );
2887
2888  let results = (outs);
2889}
2890
2891def TF_DeleteMultiDeviceIteratorOp : TF_Op<"DeleteMultiDeviceIterator", []> {
2892  let summary = "A container for an iterator resource.";
2893
2894  let arguments = (ins
2895    Arg<TF_ResourceTensor, [{A handle to the multi device iterator to delete.}], [TF_DatasetIteratorFree]>:$multi_device_iterator,
2896    Arg<Variadic<TF_ResourceTensor>, [{A list of iterator handles (unused). This is added so that automatic control dependencies get added during function tracing that ensure this op runs after all the dependent iterators are deleted.}], [TF_DatasetIteratorRead]>:$iterators,
2897    Arg<TF_VariantTensor, [{A variant deleter.}]>:$deleter
2898  );
2899
2900  let results = (outs);
2901
2902  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<1>;
2903}
2904
2905def TF_DeleteRandomSeedGeneratorOp : TF_Op<"DeleteRandomSeedGenerator", []> {
2906  let summary = "";
2907
2908  let arguments = (ins
2909    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
2910    TF_VariantTensor:$deleter
2911  );
2912
2913  let results = (outs);
2914}
2915
2916def TF_DeleteSeedGeneratorOp : TF_Op<"DeleteSeedGenerator", []> {
2917  let summary = "";
2918
2919  let arguments = (ins
2920    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorFree]>:$handle,
2921    TF_VariantTensor:$deleter
2922  );
2923
2924  let results = (outs);
2925}
2926
2927def TF_DepthToSpaceOp : TF_Op<"DepthToSpace", [NoSideEffect]> {
2928  let summary = "DepthToSpace for tensors of type T.";
2929
2930  let description = [{
2931Rearranges data from depth into blocks of spatial data.
2932This is the reverse transformation of SpaceToDepth. More specifically,
2933this op outputs a copy of the input tensor where values from the `depth`
2934dimension are moved in spatial blocks to the `height` and `width` dimensions.
2935The attr `block_size` indicates the input block size and how the data is moved.
2936
2937  * Chunks of data of size `block_size * block_size` from depth are rearranged
2938    into non-overlapping blocks of size `block_size x block_size`
2939  * The width the output tensor is `input_depth * block_size`, whereas the
2940    height is `input_height * block_size`.
2941  * The Y, X coordinates within each block of the output image are determined
2942    by the high order component of the input channel index.
2943  * The depth of the input tensor must be divisible by
2944    `block_size * block_size`.
2945
2946The `data_format` attr specifies the layout of the input and output tensors
2947with the following options:
2948  "NHWC": `[ batch, height, width, channels ]`
2949  "NCHW": `[ batch, channels, height, width ]`
2950  "NCHW_VECT_C":
2951      `qint8 [ batch, channels / 4, height, width, 4 ]`
2952
2953It is useful to consider the operation as transforming a 6-D Tensor.
2954e.g. for data_format = NHWC,
2955     Each element in the input tensor can be specified via 6 coordinates,
2956     ordered by decreasing memory layout significance as:
2957     n,iY,iX,bY,bX,oC  (where n=batch index, iX, iY means X or Y coordinates
2958                        within the input image, bX, bY means coordinates
2959                        within the output block, oC means output channels).
2960     The output would be the input transposed to the following layout:
2961     n,iY,bY,iX,bX,oC
2962
2963This operation is useful for resizing the activations between convolutions
2964(but keeping all data), e.g. instead of pooling. It is also useful for training
2965purely convolutional models.
2966
2967For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and
2968block_size = 2:
2969
2970```
2971x = [[[[1, 2, 3, 4]]]]
2972
2973```
2974
2975This operation will output a tensor of shape `[1, 2, 2, 1]`:
2976
2977```
2978   [[[[1], [2]],
2979     [[3], [4]]]]
2980```
2981
2982Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`,
2983the corresponding output will have 2x2 elements and will have a depth of
29841 channel (1 = `4 / (block_size * block_size)`).
2985The output element shape is `[2, 2, 1]`.
2986
2987For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g.
2988
2989```
2990x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
2991```
2992
2993This operation, for block size of 2, will return the following tensor of shape
2994`[1, 2, 2, 3]`
2995
2996```
2997   [[[[1, 2, 3], [4, 5, 6]],
2998     [[7, 8, 9], [10, 11, 12]]]]
2999
3000```
3001
3002Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2:
3003
3004```
3005x =  [[[[1, 2, 3, 4],
3006       [5, 6, 7, 8]],
3007      [[9, 10, 11, 12],
3008       [13, 14, 15, 16]]]]
3009```
3010
3011the operator will return the following tensor of shape `[1 4 4 1]`:
3012
3013```
3014x = [[[ [1],   [2],  [5],  [6]],
3015      [ [3],   [4],  [7],  [8]],
3016      [ [9],  [10], [13],  [14]],
3017      [ [11], [12], [15],  [16]]]]
3018
3019```
3020  }];
3021
3022  let arguments = (ins
3023    TF_Tensor:$input,
3024
3025    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
3026    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
3027  );
3028
3029  let results = (outs
3030    TF_Tensor:$output
3031  );
3032
3033  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3034}
3035
3036def TF_DepthwiseConv2dNativeOp : TF_Op<"DepthwiseConv2dNative", [NoSideEffect]> {
3037  let summary = [{
3038Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors.
3039  }];
3040
3041  let description = [{
3042Given an input tensor of shape `[batch, in_height, in_width, in_channels]`
3043and a filter / kernel tensor of shape
3044`[filter_height, filter_width, in_channels, channel_multiplier]`, containing
3045`in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies
3046a different filter to each input channel (expanding from 1 channel to
3047`channel_multiplier` channels for each), then concatenates the results
3048together. Thus, the output has `in_channels * channel_multiplier` channels.
3049
3050```
3051for k in 0..in_channels-1
3052  for q in 0..channel_multiplier-1
3053    output[b, i, j, k * channel_multiplier + q] =
3054      sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] *
3055                        filter[di, dj, k, q]
3056```
3057
3058Must have `strides[0] = strides[3] = 1`.  For the most common case of the same
3059horizontal and vertices strides, `strides = [1, stride, stride, 1]`.
3060  }];
3061
3062  let arguments = (ins
3063    TF_FloatTensor:$input,
3064    TF_FloatTensor:$filter,
3065
3066    I64ArrayAttr:$strides,
3067    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3068    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3069    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3070    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3071  );
3072
3073  let results = (outs
3074    TF_FloatTensor:$output
3075  );
3076
3077  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3078}
3079
3080def TF_DepthwiseConv2dNativeBackpropFilterOp : TF_Op<"DepthwiseConv2dNativeBackpropFilter", [NoSideEffect]> {
3081  let summary = [{
3082Computes the gradients of depthwise convolution with respect to the filter.
3083  }];
3084
3085  let arguments = (ins
3086    Arg<TF_FloatTensor, [{4-D with shape based on `data_format`.  For example, if
3087`data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height,
3088in_width, in_channels]` tensor.}]>:$input,
3089    Arg<TF_Int32Tensor, [{An integer vector representing the tensor shape of `filter`,
3090where `filter` is a 4-D
3091`[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor.}]>:$filter_sizes,
3092    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3093For example, if `data_format` is 'NHWC' then
3094out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3095Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3096
3097    I64ArrayAttr:$strides,
3098    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3099    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3100    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3101    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3102  );
3103
3104  let results = (outs
3105    Res<TF_FloatTensor, [{4-D with shape
3106`[filter_height, filter_width, in_channels, out_channels]`.  Gradient w.r.t.
3107the `filter` input of the convolution.}]>:$output
3108  );
3109
3110  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3111}
3112
3113def TF_DepthwiseConv2dNativeBackpropInputOp : TF_Op<"DepthwiseConv2dNativeBackpropInput", [NoSideEffect]> {
3114  let summary = [{
3115Computes the gradients of depthwise convolution with respect to the input.
3116  }];
3117
3118  let arguments = (ins
3119    Arg<TF_Int32Tensor, [{An integer vector representing the shape of `input`, based
3120on `data_format`.  For example, if `data_format` is 'NHWC' then
3121 `input` is a 4-D `[batch, height, width, channels]` tensor.}]>:$input_sizes,
3122    Arg<TF_FloatTensor, [{4-D with shape
3123`[filter_height, filter_width, in_channels, depthwise_multiplier]`.}]>:$filter,
3124    Arg<TF_FloatTensor, [{4-D with shape  based on `data_format`.
3125For example, if `data_format` is 'NHWC' then
3126out_backprop shape is `[batch, out_height, out_width, out_channels]`.
3127Gradients w.r.t. the output of the convolution.}]>:$out_backprop,
3128
3129    I64ArrayAttr:$strides,
3130    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
3131    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
3132    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
3133    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations
3134  );
3135
3136  let results = (outs
3137    Res<TF_FloatTensor, [{4-D with shape according to `data_format`.  For example, if
3138`data_format` is 'NHWC', output shape is `[batch, in_height,
3139in_width, in_channels]`.  Gradient w.r.t. the input of the
3140convolution.}]>:$output
3141  );
3142
3143  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3144}
3145
3146def TF_DequantizeOp : TF_Op<"Dequantize", [NoSideEffect]> {
3147  let summary = [{
3148Dequantize the 'input' tensor into a float or bfloat16 Tensor.
3149  }];
3150
3151  let description = [{
3152[min_range, max_range] are scalar floats that specify the range for
3153the output. The 'mode' attribute controls exactly which calculations are
3154used to convert the float values to their quantized equivalents.
3155
3156In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
3157
3158```
3159if T == qint8: in[i] += (range(T) + 1)/ 2.0
3160out[i] = min_range + (in[i]* (max_range - min_range) / range(T))
3161```
3162here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()`
3163
3164*MIN_COMBINED Mode Example*
3165
3166If the input comes from a QuantizedRelu6, the output type is
3167quint8 (range of 0-255) but the possible range of QuantizedRelu6 is
31680-6.  The min_range and max_range values are therefore 0.0 and 6.0.
3169Dequantize on quint8 will take each value, cast to float, and multiply
3170by 6 / 255.
3171Note that if quantizedtype is qint8, the operation will additionally add
3172each value by 128 prior to casting.
3173
3174If the mode is 'MIN_FIRST', then this approach is used:
3175
3176```c++
3177num_discrete_values = 1 << (# of bits in T)
3178range_adjust = num_discrete_values / (num_discrete_values - 1)
3179range = (range_max - range_min) * range_adjust
3180range_scale = range / num_discrete_values
3181const double offset_input = static_cast<double>(input) - lowest_quantized;
3182result = range_min + ((input - numeric_limits<T>::min()) * range_scale)
3183```
3184
3185If the mode is `SCALED`, dequantization is performed by multiplying each
3186input value by a scaling_factor. (Thus an input of 0 always maps to 0.0).
3187
3188The scaling_factor is determined from `min_range`, `max_range`, and
3189`narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}`
3190and `QuantizeV2`, using the following algorithm:
3191
3192```c++
3193
3194  const int min_expected_T = std::numeric_limits<T>::min() +
3195    (narrow_range ? 1 : 0);
3196  const int max_expected_T = std::numeric_limits<T>::max();
3197  const float max_expected_T = std::numeric_limits<float>::max();
3198
3199  const float scale_factor =
3200    (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T)
3201                                         : std::max(min_range / min_expected_T,
3202                                                    max_range / max_expected_T);
3203```
3204  }];
3205
3206  let arguments = (ins
3207    TensorOf<[TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8]>:$input,
3208    Arg<TF_Float32Tensor, [{The minimum scalar value possibly produced for the input.}]>:$min_range,
3209    Arg<TF_Float32Tensor, [{The maximum scalar value possibly produced for the input.}]>:$max_range,
3210
3211    DefaultValuedAttr<TF_AnyStrAttrOf<["MIN_COMBINED", "MIN_FIRST", "SCALED"]>, "MIN_COMBINED">:$mode,
3212    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
3213    DefaultValuedAttr<I64Attr, "-1">:$axis
3214  );
3215
3216  let results = (outs
3217    TensorOf<[TF_Bfloat16, TF_Float32]>:$output
3218  );
3219
3220  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3221  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3222}
3223
3224def TF_DeserializeIteratorOp : TF_Op<"DeserializeIterator", []> {
3225  let summary = [{
3226Converts the given variant tensor to an iterator and stores it in the given resource.
3227  }];
3228
3229  let arguments = (ins
3230    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorWrite]>:$resource_handle,
3231    Arg<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
3232resource.}]>:$serialized
3233  );
3234
3235  let results = (outs);
3236}
3237
3238def TF_DeserializeSparseOp : TF_Op<"DeserializeSparse", [NoSideEffect]> {
3239  let summary = "Deserialize `SparseTensor` objects.";
3240
3241  let description = [{
3242The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
3243the last dimension stores serialized `SparseTensor` objects and the other N
3244dimensions (N >= 0) correspond to a batch. The ranks of the original
3245`SparseTensor` objects must all match. When the final `SparseTensor` is
3246created, its rank is the rank of the incoming `SparseTensor` objects plus N;
3247the sparse tensors have been concatenated along new dimensions, one for each
3248batch.
3249
3250The output `SparseTensor` object's shape values for the original dimensions
3251are the max across the input `SparseTensor` objects' shape values for the
3252corresponding dimensions. The new dimensions match the size of the batch.
3253
3254The input `SparseTensor` objects' indices are assumed ordered in
3255standard lexicographic order.  If this is not the case, after this
3256step run `SparseReorder` to restore index ordering.
3257
3258For example, if the serialized input is a `[2 x 3]` matrix representing two
3259original `SparseTensor` objects:
3260
3261    index = [ 0]
3262            [10]
3263            [20]
3264    values = [1, 2, 3]
3265    shape = [50]
3266
3267and
3268
3269    index = [ 2]
3270            [10]
3271    values = [4, 5]
3272    shape = [30]
3273
3274then the final deserialized `SparseTensor` will be:
3275
3276    index = [0  0]
3277            [0 10]
3278            [0 20]
3279            [1  2]
3280            [1 10]
3281    values = [1, 2, 3, 4, 5]
3282    shape = [2 50]
3283  }];
3284
3285  let arguments = (ins
3286    Arg<TensorOf<[TF_Str, TF_Variant]>, [{The serialized `SparseTensor` objects. The last dimension
3287must have 3 columns.}]>:$serialized_sparse
3288  );
3289
3290  let results = (outs
3291    TF_Int64Tensor:$sparse_indices,
3292    TF_Tensor:$sparse_values,
3293    TF_Int64Tensor:$sparse_shape
3294  );
3295
3296  TF_DerivedOperandTypeAttr Tserialized = TF_DerivedOperandTypeAttr<0>;
3297  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<1>;
3298}
3299
3300def TF_DestroyResourceOp : TF_Op<"DestroyResourceOp", []> {
3301  let summary = "Deletes the resource specified by the handle.";
3302
3303  let description = [{
3304All subsequent operations using the resource will result in a NotFound
3305error status.
3306  }];
3307
3308  let arguments = (ins
3309    Arg<TF_ResourceTensor, [{handle to the resource to delete.}]>:$resource,
3310
3311    DefaultValuedAttr<BoolAttr, "true">:$ignore_lookup_error
3312  );
3313
3314  let results = (outs);
3315}
3316
3317def TF_DeviceIndexOp : TF_Op<"DeviceIndex", [NoSideEffect]> {
3318  let summary = "Return the index of device the op runs.";
3319
3320  let description = [{
3321Given a list of device names, this operation returns the index of the device
3322this op runs. The length of the list is returned in two cases:
3323(1) Device does not exist in the given device list.
3324(2) It is in XLA compilation.
3325  }];
3326
3327  let arguments = (ins
3328    StrArrayAttr:$device_names
3329  );
3330
3331  let results = (outs
3332    TF_Int32Tensor:$index
3333  );
3334}
3335
3336def TF_DiagOp : TF_Op<"Diag", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
3337  let summary = "Returns a diagonal tensor with a given diagonal values.";
3338
3339  let description = [{
3340Given a `diagonal`, this operation returns a tensor with the `diagonal` and
3341everything else padded with zeros. The diagonal is computed as follows:
3342
3343Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of
3344rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where:
3345
3346`output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else.
3347
3348For example:
3349
3350```
3351# 'diagonal' is [1, 2, 3, 4]
3352tf.diag(diagonal) ==> [[1, 0, 0, 0]
3353                       [0, 2, 0, 0]
3354                       [0, 0, 3, 0]
3355                       [0, 0, 0, 4]]
3356```
3357  }];
3358
3359  let arguments = (ins
3360    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is at most 1.}]>:$diagonal
3361  );
3362
3363  let results = (outs
3364    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$output
3365  );
3366
3367  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3368}
3369
3370def TF_DiagPartOp : TF_Op<"DiagPart", [NoSideEffect]> {
3371  let summary = "Returns the diagonal part of the tensor.";
3372
3373  let description = [{
3374This operation returns a tensor with the `diagonal` part
3375of the `input`. The `diagonal` part is computed as follows:
3376
3377Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a
3378tensor of rank `k` with dimensions `[D1,..., Dk]` where:
3379
3380`diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`.
3381
3382For example:
3383
3384```
3385# 'input' is [[1, 0, 0, 0]
3386              [0, 2, 0, 0]
3387              [0, 0, 3, 0]
3388              [0, 0, 0, 4]]
3389
3390tf.diag_part(input) ==> [1, 2, 3, 4]
3391```
3392  }];
3393
3394  let arguments = (ins
3395    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Rank k tensor where k is even and not zero.}]>:$input
3396  );
3397
3398  let results = (outs
3399    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The extracted diagonal.}]>:$diagonal
3400  );
3401
3402  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3403}
3404
3405def TF_DigammaOp : TF_Op<"Digamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3406  let summary = [{
3407Computes Psi, the derivative of Lgamma (the log of the absolute value of
3408  }];
3409
3410  let description = [{
3411`Gamma(x)`), element-wise.
3412  }];
3413
3414  let arguments = (ins
3415    TF_FloatTensor:$x
3416  );
3417
3418  let results = (outs
3419    TF_FloatTensor:$y
3420  );
3421
3422  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3423}
3424
3425def TF_DivOp : TF_Op<"Div", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
3426               WithBroadcastableBinOpBuilder {
3427  let summary = "Returns x / y element-wise.";
3428
3429  let description = [{
3430*NOTE*: `Div` supports broadcasting. More about broadcasting
3431[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3432  }];
3433
3434  let arguments = (ins
3435    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
3436    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
3437  );
3438
3439  let results = (outs
3440    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
3441  );
3442
3443  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3444
3445  let hasCanonicalizer = 1;
3446
3447  let hasFolder = 1;
3448}
3449
3450def TF_DummyMemoryCacheOp : TF_Op<"DummyMemoryCache", []> {
3451  let summary = "";
3452
3453  let arguments = (ins);
3454
3455  let results = (outs
3456    Res<TF_ResourceTensor, "", [TF_DatasetMemoryCacheAlloc]>:$handle
3457  );
3458}
3459
3460def TF_DummySeedGeneratorOp : TF_Op<"DummySeedGenerator", []> {
3461  let summary = "";
3462
3463  let arguments = (ins);
3464
3465  let results = (outs
3466    Res<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorAlloc]>:$handle
3467  );
3468}
3469
3470def TF_DynamicStitchOp : TF_Op<"DynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
3471  let summary = [{
3472Interleave the values from the `data` tensors into a single tensor.
3473  }];
3474
3475  let description = [{
3476Builds a merged tensor such that
3477
3478```python
3479    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
3480```
3481
3482For example, if each `indices[m]` is scalar or vector, we have
3483
3484```python
3485    # Scalar indices:
3486    merged[indices[m], ...] = data[m][...]
3487
3488    # Vector indices:
3489    merged[indices[m][i], ...] = data[m][i, ...]
3490```
3491
3492Each `data[i].shape` must start with the corresponding `indices[i].shape`,
3493and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
3494must have `data[i].shape = indices[i].shape + constant`.  In terms of this
3495`constant`, the output shape is
3496
3497    merged.shape = [max(indices)] + constant
3498
3499Values are merged in order, so if an index appears in both `indices[m][i]` and
3500`indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the
3501merged result. If you do not need this guarantee, ParallelDynamicStitch might
3502perform better on some devices.
3503
3504For example:
3505
3506```python
3507    indices[0] = 6
3508    indices[1] = [4, 1]
3509    indices[2] = [[5, 2], [0, 3]]
3510    data[0] = [61, 62]
3511    data[1] = [[41, 42], [11, 12]]
3512    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
3513    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
3514              [51, 52], [61, 62]]
3515```
3516
3517This method can be used to merge partitions created by `dynamic_partition`
3518as illustrated on the following example:
3519
3520```python
3521    # Apply function (increments x_i) on elements for which a certain condition
3522    # apply (x_i != -1 in this example).
3523    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
3524    condition_mask=tf.not_equal(x,tf.constant(-1.))
3525    partitioned_data = tf.dynamic_partition(
3526        x, tf.cast(condition_mask, tf.int32) , 2)
3527    partitioned_data[1] = partitioned_data[1] + 1.0
3528    condition_indices = tf.dynamic_partition(
3529        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
3530    x = tf.dynamic_stitch(condition_indices, partitioned_data)
3531    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
3532    # unchanged.
3533```
3534
3535<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
3536<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
3537</div>
3538  }];
3539
3540  let arguments = (ins
3541    Variadic<TF_Int32Tensor>:$indices,
3542    Variadic<TF_Tensor>:$data
3543  );
3544
3545  let results = (outs
3546    TF_Tensor:$merged
3547  );
3548
3549  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
3550  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3551
3552  let verifier = [{
3553    return Verify(*this);
3554  }];
3555}
3556
3557def TF_EinsumOp : TF_Op<"Einsum", [NoSideEffect]> {
3558  let summary = [{
3559Tensor contraction according to Einstein summation convention.
3560  }];
3561
3562  let description = [{
3563Implements generalized Tensor contraction and reduction. Each input Tensor must
3564have a corresponding input subscript appearing in the comma-separated left-hand
3565side of the equation. The right-hand side of the equation consists of the
3566output subscript. The input subscripts and the output subscript should consist
3567of zero or more named axis labels and at most one ellipsis (`...`).
3568
3569The named axis labels may be any single character other than those having
3570special meaning, namely `,.->`. The behavior of this Op is undefined if it
3571receives an ill-formatted equation; since the validation is done at
3572graph-building time, we omit format validation checks at runtime.
3573
3574Note: This Op is *not* intended to be called by the user; instead users should
3575call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
3576
3577Operations are applied to the input(s) according to the following rules:
3578
3579 (a) Generalized Diagonals: For input dimensions corresponding to axis labels
3580     appearing more than once in the same input subscript, we take the
3581     generalized (`k`-dimensional) diagonal.
3582     For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
3583     generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
3584     `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
3585
3586 (b) Reduction: Axes corresponding to labels appearing only in one input
3587     subscript but not in the output subscript are summed over prior to Tensor
3588     contraction.
3589     For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
3590     the reduction axis labels.
3591
3592 (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
3593     input subscripts and also in the output subscript make up the batch
3594     dimensions in Tensor contraction. Unnamed axis labels corresponding to
3595     ellipsis (`...`) also correspond to batch dimensions.
3596     For example, for the equation denoting batch matrix multiplication,
3597     `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
3598
3599 (d) Contraction: In case of binary einsum, axes corresponding to labels
3600     appearing in two different inputs (and not in the output) are contracted
3601     against each other.
3602     Considering the batch matrix multiplication equation again
3603     (`bij,bjk->bik`), the contracted axis label is `j`.
3604
3605 (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
3606     labels, the opposite operation of (a) is applied. For example, in the
3607     equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
3608     are all zeros, except for the (generalized) diagonal which is populated
3609     with values from the input.
3610     Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
3611     provided to enable computing the symbolic gradient of `tf.einsum`.
3612
3613The output subscripts must contain only labels appearing in at least one of the
3614input subscripts. Furthermore, all dimensions mapping to the same axis label
3615must be equal.
3616
3617Any of the input and output subscripts may contain at most a single ellipsis
3618(`...`). These ellipsis are mapped against dimensions not corresponding to any
3619named axis label. If two inputs contain ellipsis, then they are broadcasted
3620according to standard NumPy broadcasting
3621[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
3622
3623The broadcasted dimensions are placed in the corresponding location of the
3624ellipsis in the output subscript. If the broadcasted dimensions are non-empty
3625and the output subscripts do not contain ellipsis, then an InvalidArgument error
3626is raised.
3627
3628@compatibility(numpy)
3629Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
3630
3631Comparison with `numpy.einsum`:
3632
3633 * This Op only supports unary and binary forms of `numpy.einsum`.
3634 * This Op does not support implicit form. (i.e. equations without `->`).
3635 * This Op also supports repeated indices in the output subscript, which is not
3636   supported by `numpy.einsum`.
3637@end_compatibility
3638  }];
3639
3640  let arguments = (ins
3641    Arg<Variadic<TF_Tensor>, [{List of 1 or 2 Tensors.}]>:$inputs,
3642
3643    StrAttr:$equation
3644  );
3645
3646  let results = (outs
3647    Res<TF_Tensor, [{Output Tensor with shape depending upon `equation`.}]>:$output
3648  );
3649
3650  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3651  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3652
3653  let verifier = [{
3654    return Verify(*this);
3655  }];
3656}
3657
3658def TF_EluOp : TF_Op<"Elu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3659  let summary = [{
3660Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise.
3661  }];
3662
3663  let description = [{
3664See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)
3665](http://arxiv.org/abs/1511.07289)
3666  }];
3667
3668  let arguments = (ins
3669    TF_FloatTensor:$features
3670  );
3671
3672  let results = (outs
3673    TF_FloatTensor:$activations
3674  );
3675
3676  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3677}
3678
3679def TF_EluGradOp : TF_Op<"EluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3680  let summary = [{
3681Computes gradients for the exponential linear (Elu) operation.
3682  }];
3683
3684  let arguments = (ins
3685    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Elu operation.}]>:$gradients,
3686    Arg<TF_FloatTensor, [{The outputs of the corresponding Elu operation.}]>:$outputs
3687  );
3688
3689  let results = (outs
3690    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + 1)` if outputs < 0,
3691`gradients` otherwise.}]>:$backprops
3692  );
3693
3694  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3695}
3696
3697def TF_EmptyOp : TF_Op<"Empty", []> {
3698  let summary = [{
3699Creates a tensor with the given shape.
3700
3701This operation creates a tensor of `shape` and `dtype`.
3702  }];
3703
3704  let arguments = (ins
3705    Arg<TF_Int32Tensor, [{1-D. Represents the shape of the output tensor.}]>:$shape,
3706
3707    DefaultValuedAttr<BoolAttr, "false">:$init
3708  );
3709
3710  let results = (outs
3711    Res<TF_Tensor, [{A `Tensor` of type `T`.}]>:$output
3712  );
3713
3714  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
3715
3716  let hasFolder = 1;
3717}
3718
3719def TF_EnqueueTPUEmbeddingIntegerBatchOp : TF_Op<"EnqueueTPUEmbeddingIntegerBatch", [TF_TPUEmbeddingSideEffect]> {
3720  let summary = [{
3721An op that enqueues a list of input batch tensors to TPUEmbedding.
3722  }];
3723
3724  let arguments = (ins
3725    Arg<Variadic<TF_Int32Tensor>, [{A list of 1D tensors, one for each embedding table, containing the
3726indices into the tables.}]>:$batch,
3727    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3728TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3729'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3730in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3731
3732    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal
3733  );
3734
3735  let results = (outs);
3736
3737  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3738}
3739
3740def TF_EnqueueTPUEmbeddingRaggedTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingRaggedTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3741  let summary = "Eases the porting of code that uses tf.nn.embedding_lookup().";
3742
3743  let description = [{
3744sample_splits[i], embedding_indices[i] and aggregation_weights[i] correspond
3745to the ith feature. table_ids[i] indicates which embedding table to look up ith
3746feature.
3747
3748The tensors at corresponding positions in two of the input lists,
3749embedding_indices and aggregation_weights, must have the same shape, i.e. rank 1
3750with dim_size() equal to the total number of lookups into the table described by
3751the corresponding feature.
3752  }];
3753
3754  let arguments = (ins
3755    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the break points for splitting
3756embedding_indices and aggregation_weights into rows.
3757It corresponds to ids.row_splits in embedding_lookup(), when ids is a
3758RaggedTensor.}]>:$sample_splits,
3759    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
3760It corresponds to ids.values in embedding_lookup(), when ids is a RaggedTensor.}]>:$embedding_indices,
3761    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
3762aggregation weights. It corresponds to the values field of a RaggedTensor
3763with the same row_splits as ids in embedding_lookup(), when ids is a
3764RaggedTensor.}]>:$aggregation_weights,
3765    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3766TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3767'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3768in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3769
3770    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3771    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
3772    I64ArrayAttr:$table_ids,
3773    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
3774    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
3775  );
3776
3777  let results = (outs);
3778
3779  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3780  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3781  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3782  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3783}
3784
3785def TF_EnqueueTPUEmbeddingSparseBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3786  let summary = [{
3787An op that enqueues TPUEmbedding input indices from a SparseTensor.
3788  }];
3789
3790  let description = [{
3791This Op eases the porting of code that uses embedding_lookup_sparse(),
3792although some Python preprocessing of the SparseTensor arguments to
3793embedding_lookup_sparse() is required to produce the arguments to this Op,
3794since only a single EnqueueTPUEmbeddingSparseBatch Op is allowed per training
3795step.
3796
3797The tensors at corresponding positions in the three input lists
3798must have the same shape, i.e. rank 1 with dim_size() equal to the total
3799number of lookups into the table described by the corresponding table_id.
3800  }];
3801
3802  let arguments = (ins
3803    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example and
3804feature to which the corresponding embedding_indices and aggregation_weights
3805values belong. sample_indices[i] must equal b * nf + f, where nf is the
3806number of features from the corresponding table, f is in [0, nf), and
3807b is in [0, batch size).}]>:$sample_indices,
3808    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.}]>:$embedding_indices,
3809    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per sample -- i.e. per
3810(training example, feature) -- aggregation weights.}]>:$aggregation_weights,
3811    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3812TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3813'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3814in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3815
3816    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3817    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
3818  );
3819
3820  let results = (outs);
3821
3822  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3823  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3824  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3825  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3826}
3827
3828def TF_EnqueueTPUEmbeddingSparseTensorBatchOp : TF_Op<"EnqueueTPUEmbeddingSparseTensorBatch", [SameVariadicOperandSize, TF_TPUEmbeddingSideEffect]> {
3829  let summary = [{
3830Eases the porting of code that uses tf.nn.embedding_lookup_sparse().
3831  }];
3832
3833  let description = [{
3834sample_indices[i], embedding_indices[i] and aggregation_weights[i] correspond
3835to the ith feature. table_ids[i] indicates which embedding table to look up ith
3836feature.
3837
3838The tensors at corresponding positions in the three input lists (sample_indices,
3839embedding_indices and aggregation_weights) must have the same shape, i.e. rank 1
3840with dim_size() equal to the total number of lookups into the table described by
3841the corresponding feature.
3842  }];
3843
3844  let arguments = (ins
3845    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors specifying the training example to
3846which the corresponding embedding_indices and aggregation_weights values
3847belong. It corresponds to sp_ids.indices[:,0] in  embedding_lookup_sparse().}]>:$sample_indices,
3848    Arg<Variadic<TF_I32OrI64Tensor>, [{A list of rank 1 Tensors, indices into the embedding tables.
3849It corresponds to sp_ids.values in embedding_lookup_sparse().}]>:$embedding_indices,
3850    Arg<Variadic<TF_F32OrF64Tensor>, [{A list of rank 1 Tensors containing per training example
3851aggregation weights. It corresponds to sp_weights.values in
3852embedding_lookup_sparse().}]>:$aggregation_weights,
3853    Arg<TF_StrTensor, [{A string input that overrides the mode specified in the
3854TPUEmbeddingConfiguration. Supported values are {'unspecified', 'inference',
3855'training', 'backward_pass_only'}. When set to 'unspecified', the mode set
3856in TPUEmbeddingConfiguration is used, otherwise mode_override is used.}]>:$mode_override,
3857
3858    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
3859    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners,
3860    I64ArrayAttr:$table_ids,
3861    DefaultValuedAttr<I64ArrayAttr, "{}">:$max_sequence_lengths,
3862    DefaultValuedAttr<I64ArrayAttr, "{}">:$num_features
3863  );
3864
3865  let results = (outs);
3866
3867  TF_DerivedOperandTypeAttr T1 = TF_DerivedOperandTypeAttr<0>;
3868  TF_DerivedOperandTypeAttr T2 = TF_DerivedOperandTypeAttr<1>;
3869  TF_DerivedOperandTypeAttr T3 = TF_DerivedOperandTypeAttr<2>;
3870  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
3871}
3872
3873def TF_EnsureShapeOp : TF_Op<"EnsureShape", [NoSideEffect]> {
3874  let summary = "Ensures that the tensor's shape matches the expected shape.";
3875
3876  let description = [{
3877Raises an error if the input tensor's shape does not match the specified shape.
3878Returns the input tensor otherwise.
3879  }];
3880
3881  let arguments = (ins
3882    Arg<TF_Tensor, [{A tensor, whose shape is to be validated.}]>:$input,
3883
3884    TF_ShapeAttr:$shape
3885  );
3886
3887  let results = (outs
3888    Res<TF_Tensor, [{A tensor with the same shape and contents as the input tensor or value.}]>:$output
3889  );
3890
3891  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3892}
3893
3894def TF_EqualOp : TF_Op<"Equal", [Commutative, NoSideEffect]> {
3895  let summary = "Returns the truth value of (x == y) element-wise.";
3896
3897  let description = [{
3898*NOTE*: `Equal` supports broadcasting. More about broadcasting
3899[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3900
3901```python
3902x = tf.constant([2, 4])
3903y = tf.constant(2)
3904tf.math.equal(x, y) ==> array([True, False])
3905
3906x = tf.constant([2, 4])
3907y = tf.constant([2, 4])
3908tf.math.equal(x, y) ==> array([True,  True])
3909```
3910  }];
3911
3912  let arguments = (ins
3913    TF_Tensor:$x,
3914    TF_Tensor:$y,
3915
3916    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
3917  );
3918
3919  let results = (outs
3920    TF_BoolTensor:$z
3921  );
3922
3923  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3924
3925  let builders = [
3926    OpBuilderDAG<(ins "Value":$x, "Value":$y,
3927      "BoolAttr":$incompatible_shape_error)>
3928  ];
3929
3930  let verifier = [{
3931    return Verify(*this);
3932  }];
3933}
3934
3935def TF_ErfOp : TF_Op<"Erf", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3936  let summary = "Computes the Gauss error function of `x` element-wise.";
3937
3938  let arguments = (ins
3939    TF_FloatTensor:$x
3940  );
3941
3942  let results = (outs
3943    TF_FloatTensor:$y
3944  );
3945
3946  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3947}
3948
3949def TF_ErfcOp : TF_Op<"Erfc", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
3950  let summary = [{
3951Computes the complementary error function of `x` element-wise.
3952  }];
3953
3954  let arguments = (ins
3955    TF_FloatTensor:$x
3956  );
3957
3958  let results = (outs
3959    TF_FloatTensor:$y
3960  );
3961
3962  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3963}
3964
3965def TF_ErfinvOp : TF_Op<"Erfinv", [NoSideEffect]> {
3966  let summary = "";
3967
3968  let arguments = (ins
3969    TF_FloatTensor:$x
3970  );
3971
3972  let results = (outs
3973    TF_FloatTensor:$y
3974  );
3975
3976  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
3977}
3978
3979def TF_ExpOp : TF_Op<"Exp", [NoSideEffect, SameOperandsAndResultType]> {
3980  let summary = [{
3981Computes exponential of x element-wise.  \\(y = e^x\\).
3982  }];
3983
3984  let description = [{
3985This function computes the exponential of every element in the input tensor.
3986  i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor.
3987  `e` denotes Euler's number and is approximately equal to 2.718281.
3988  Output is positive for any real input.
3989
3990  ```python
3991  x = tf.constant(2.0)
3992  tf.math.exp(x) ==> 7.389056
3993
3994  x = tf.constant([2.0, 8.0])
3995  tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32)
3996  ```
3997
3998  For complex numbers, the exponential value is calculated as follows:
3999
4000  ```
4001  e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y)
4002  ```
4003
4004  Let's consider complex number 1+1j as an example.
4005  e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j)
4006
4007  ```python
4008  x = tf.constant(1 + 1j)
4009  tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j
4010  ```
4011  }];
4012
4013  let arguments = (ins
4014    TF_FpOrComplexTensor:$x
4015  );
4016
4017  let results = (outs
4018    TF_FpOrComplexTensor:$y
4019  );
4020
4021  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4022}
4023
4024def TF_ExpandDimsOp : TF_Op<"ExpandDims", [NoSideEffect]> {
4025  let summary = "Inserts a dimension of 1 into a tensor's shape.";
4026
4027  let description = [{
4028Given a tensor `input`, this operation inserts a dimension of 1 at the
4029dimension index `axis` of `input`'s shape. The dimension index `axis` starts at
4030zero; if you specify a negative number for `axis` it is counted backward from
4031the end.
4032
4033This operation is useful if you want to add a batch dimension to a single
4034element. For example, if you have a single image of shape `[height, width,
4035channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`,
4036which will make the shape `[1, height, width, channels]`.
4037
4038Other examples:
4039
4040```
4041# 't' is a tensor of shape [2]
4042shape(expand_dims(t, 0)) ==> [1, 2]
4043shape(expand_dims(t, 1)) ==> [2, 1]
4044shape(expand_dims(t, -1)) ==> [2, 1]
4045
4046# 't2' is a tensor of shape [2, 3, 5]
4047shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5]
4048shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5]
4049shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1]
4050```
4051
4052This operation requires that:
4053
4054`-1-input.dims() <= dim <= input.dims()`
4055
4056This operation is related to `squeeze()`, which removes dimensions of
4057size 1.
4058  }];
4059
4060  let arguments = (ins
4061    TF_Tensor:$input,
4062    Arg<TF_I32OrI64Tensor, [{0-D (scalar). Specifies the dimension index at which to
4063expand the shape of `input`. Must be in the range
4064`[-rank(input) - 1, rank(input)]`.}]>:$dim
4065  );
4066
4067  let results = (outs
4068    Res<TF_Tensor, [{Contains the same data as `input`, but its shape has an additional
4069dimension of size 1 added.}]>:$output
4070  );
4071
4072  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4073  TF_DerivedOperandTypeAttr Tdim = TF_DerivedOperandTypeAttr<1>;
4074
4075  let builders = [
4076    OpBuilderDAG<(ins "Value":$condition, "Value":$dim)>
4077  ];
4078}
4079
4080def TF_Expm1Op : TF_Op<"Expm1", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4081  let summary = "Computes `exp(x) - 1` element-wise.";
4082
4083  let description = [{
4084i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor.
4085  `e` denotes Euler's number and is approximately equal to 2.718281.
4086
4087  ```python
4088  x = tf.constant(2.0)
4089  tf.math.expm1(x) ==> 6.389056
4090
4091  x = tf.constant([2.0, 8.0])
4092  tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32)
4093
4094  x = tf.constant(1 + 1j)
4095  tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j)
4096  ```
4097  }];
4098
4099  let arguments = (ins
4100    TF_FpOrComplexTensor:$x
4101  );
4102
4103  let results = (outs
4104    TF_FpOrComplexTensor:$y
4105  );
4106
4107  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4108}
4109
4110def TF_ExtractImagePatchesOp : TF_Op<"ExtractImagePatches", [NoSideEffect]> {
4111  let summary = [{
4112Extract `patches` from `images` and put them in the "depth" output dimension.
4113  }];
4114
4115  let arguments = (ins
4116    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, in_rows, in_cols, depth]`.}]>:$images,
4117
4118    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksizes,
4119    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
4120    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$rates,
4121    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding
4122  );
4123
4124  let results = (outs
4125    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{4-D Tensor with shape `[batch, out_rows, out_cols, ksize_rows *
4126ksize_cols * depth]` containing image patches with size
4127`ksize_rows x ksize_cols x depth` vectorized in the "depth" dimension. Note
4128`out_rows` and `out_cols` are the dimensions of the output patches.}]>:$patches
4129  );
4130
4131  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4132}
4133
4134def TF_FFTOp : TF_Op<"FFT", [NoSideEffect]> {
4135  let summary = "Fast Fourier transform.";
4136
4137  let description = [{
4138Computes the 1-dimensional discrete Fourier transform over the inner-most
4139dimension of `input`.
4140  }];
4141
4142  let arguments = (ins
4143    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4144  );
4145
4146  let results = (outs
4147    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
4148  dimension of `input` is replaced with its 1D Fourier transform.
4149
4150@compatibility(numpy)
4151Equivalent to np.fft.fft
4152@end_compatibility}]>:$output
4153  );
4154
4155  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4156}
4157
4158def TF_FFT2DOp : TF_Op<"FFT2D", [NoSideEffect]> {
4159  let summary = "2D fast Fourier transform.";
4160
4161  let description = [{
4162Computes the 2-dimensional discrete Fourier transform over the inner-most
41632 dimensions of `input`.
4164  }];
4165
4166  let arguments = (ins
4167    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4168  );
4169
4170  let results = (outs
4171    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
4172  dimensions of `input` are replaced with their 2D Fourier transform.
4173
4174@compatibility(numpy)
4175Equivalent to np.fft.fft2
4176@end_compatibility}]>:$output
4177  );
4178
4179  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4180}
4181
4182def TF_FFT3DOp : TF_Op<"FFT3D", [NoSideEffect]> {
4183  let summary = "3D fast Fourier transform.";
4184
4185  let description = [{
4186Computes the 3-dimensional discrete Fourier transform over the inner-most 3
4187dimensions of `input`.
4188  }];
4189
4190  let arguments = (ins
4191    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
4192  );
4193
4194  let results = (outs
4195    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
4196  dimensions of `input` are replaced with their 3D Fourier transform.
4197
4198@compatibility(numpy)
4199Equivalent to np.fft.fftn with 3 dimensions.
4200@end_compatibility}]>:$output
4201  );
4202
4203  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
4204}
4205
4206def TF_FakeParamOp : TF_Op<"FakeParam", [NoSideEffect, TF_NoConstantFold]> {
4207  let summary = [{
4208  This op is used as a placeholder in If branch functions. It doesn't provide a
4209  valid output when run, so must either be removed (e.g. replaced with a
4210  function input) or guaranteed not to be used (e.g. if mirroring an
4211  intermediate output needed for the gradient computation of the other branch).
4212  }];
4213
4214  let arguments = (ins
4215    TF_ShapeAttr:$shape
4216  );
4217
4218  let results = (outs
4219    Res<TF_Tensor, [{    \"Fake\" output value. This should not be consumed by another op.}]>:$output
4220  );
4221
4222  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
4223}
4224
4225def TF_FakeQuantWithMinMaxArgsOp : TF_Op<"FakeQuantWithMinMaxArgs", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4226  let summary = [{
4227Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type.
4228  }];
4229
4230  let description = [{
4231Attributes
4232
4233*   `[min; max]` define the clamping range for the `inputs` data.
4234*   `inputs` values are quantized into the quantization range (
4235`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4236when it is true) and then de-quantized and output as floats in `[min; max]`
4237interval.
4238*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4239
4240Before quantization, `min` and `max` values are adjusted with the following
4241logic.
4242It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4243the behavior can be unexpected:
4244
4245*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4246*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4247*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4248`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4249
4250Quantization is called fake since the output is still in floating point.
4251  }];
4252
4253  let arguments = (ins
4254    TF_Float32Tensor:$inputs,
4255
4256    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4257    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4258    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4259    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4260  );
4261
4262  let results = (outs
4263    TF_Float32Tensor:$outputs
4264  );
4265
4266  let verifier = [{
4267    return Verify(*this);
4268  }];
4269}
4270
4271def TF_FakeQuantWithMinMaxArgsGradientOp : TF_Op<"FakeQuantWithMinMaxArgsGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
4272  let summary = "Compute gradients for a FakeQuantWithMinMaxArgs operation.";
4273
4274  let arguments = (ins
4275    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxArgs operation.}]>:$gradients,
4276    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxArgs operation.}]>:$inputs,
4277
4278    DefaultValuedAttr<F32Attr, "-6.0f">:$min,
4279    DefaultValuedAttr<F32Attr, "6.0f">:$max,
4280    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4281    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4282  );
4283
4284  let results = (outs
4285    Res<TF_Float32Tensor, [{Backpropagated gradients below the FakeQuantWithMinMaxArgs operation:
4286`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops
4287  );
4288}
4289
4290def TF_FakeQuantWithMinMaxVarsOp : TF_Op<"FakeQuantWithMinMaxVars", [NoSideEffect]> {
4291  let summary = [{
4292Fake-quantize the 'inputs' tensor of type float via global float scalars
4293  }];
4294
4295  let description = [{
4296Fake-quantize the `inputs` tensor of type float via global float scalars
4297`min` and `max` to `outputs` tensor of same shape as `inputs`.
4298
4299Attributes
4300
4301*   `[min; max]` define the clamping range for the `inputs` data.
4302*   `inputs` values are quantized into the quantization range (
4303`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4304when it is true) and then de-quantized and output as floats in `[min; max]`
4305interval.
4306*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4307
4308Before quantization, `min` and `max` values are adjusted with the following
4309logic.
4310It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4311the behavior can be unexpected:
4312
4313*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4314*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4315*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4316`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4317
4318This operation has a gradient and thus allows for training `min` and `max`
4319values.
4320  }];
4321
4322  let arguments = (ins
4323    TF_Float32Tensor:$inputs,
4324    TF_Float32Tensor:$min,
4325    TF_Float32Tensor:$max,
4326
4327    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4328    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4329  );
4330
4331  let results = (outs
4332    TF_Float32Tensor:$outputs
4333  );
4334
4335  let verifier = [{
4336    return Verify(*this);
4337  }];
4338}
4339
4340def TF_FakeQuantWithMinMaxVarsGradientOp : TF_Op<"FakeQuantWithMinMaxVarsGradient", [NoSideEffect]> {
4341  let summary = "Compute gradients for a FakeQuantWithMinMaxVars operation.";
4342
4343  let arguments = (ins
4344    Arg<TF_Float32Tensor, [{Backpropagated gradients above the FakeQuantWithMinMaxVars operation.}]>:$gradients,
4345    Arg<TF_Float32Tensor, [{Values passed as inputs to the FakeQuantWithMinMaxVars operation.
4346min, max: Quantization interval, scalar floats.}]>:$inputs,
4347    TF_Float32Tensor:$min,
4348    TF_Float32Tensor:$max,
4349
4350    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4351    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4352  );
4353
4354  let results = (outs
4355    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. inputs:
4356`gradients * (inputs >= min && inputs <= max)`.}]>:$backprops_wrt_input,
4357    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. min parameter:
4358`sum(gradients * (inputs < min))`.}]>:$backprop_wrt_min,
4359    Res<TF_Float32Tensor, [{Backpropagated gradients w.r.t. max parameter:
4360`sum(gradients * (inputs > max))`.}]>:$backprop_wrt_max
4361  );
4362}
4363
4364def TF_FakeQuantWithMinMaxVarsPerChannelOp : TF_Op<"FakeQuantWithMinMaxVarsPerChannel", [NoSideEffect]> {
4365  let summary = [{
4366Fake-quantize the 'inputs' tensor of type float via per-channel floats
4367  }];
4368
4369  let description = [{
4370Fake-quantize the `inputs` tensor of type float per-channel and one of the
4371shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max`
4372of shape `[d]` to `outputs` tensor of same shape as `inputs`.
4373
4374Attributes
4375
4376*   `[min; max]` define the clamping range for the `inputs` data.
4377*   `inputs` values are quantized into the quantization range (
4378`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]`
4379when it is true) and then de-quantized and output as floats in `[min; max]`
4380interval.
4381*   `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
4382
4383Before quantization, `min` and `max` values are adjusted with the following
4384logic.
4385It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values,
4386the behavior can be unexpected:
4387
4388*   If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
4389*   If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
4390*   If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `,
4391`min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
4392
4393This operation has a gradient and thus allows for training `min` and `max`
4394values.
4395  }];
4396
4397  let arguments = (ins
4398    TF_Float32Tensor:$inputs,
4399    TF_Float32Tensor:$min,
4400    TF_Float32Tensor:$max,
4401
4402    DefaultValuedAttr<I64Attr, "8">:$num_bits,
4403    DefaultValuedAttr<BoolAttr, "false">:$narrow_range
4404  );
4405
4406  let results = (outs
4407    TF_Float32Tensor:$outputs
4408  );
4409
4410  let verifier = [{
4411    return Verify(*this);
4412  }];
4413}
4414
4415def TF_FillOp : TF_Op<"Fill", [NoSideEffect]> {
4416  let summary = "Creates a tensor filled with a scalar value.";
4417
4418  let description = [{
4419This operation creates a tensor of shape `dims` and fills it with `value`.
4420
4421For example:
4422
4423```
4424# Output tensor has shape [2, 3].
4425fill([2, 3], 9) ==> [[9, 9, 9]
4426                     [9, 9, 9]]
4427```
4428
4429`tf.fill` differs from `tf.constant` in a few ways:
4430
4431*   `tf.fill` only supports scalar contents, whereas `tf.constant` supports
4432    Tensor values.
4433*   `tf.fill` creates an Op in the computation graph that constructs the actual
4434    Tensor value at runtime. This is in contrast to `tf.constant` which embeds
4435    the entire Tensor into the graph with a `Const` node.
4436*   Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes
4437    based on other runtime Tensors, unlike `tf.constant`.
4438  }];
4439
4440  let arguments = (ins
4441    Arg<TF_I32OrI64Tensor, [{1-D. Represents the shape of the output tensor.}]>:$dims,
4442    Arg<TF_Tensor, [{0-D (scalar). Value to fill the returned tensor.
4443
4444@compatibility(numpy)
4445Equivalent to np.full
4446@end_compatibility}]>:$value
4447  );
4448
4449  let results = (outs
4450    TF_Tensor:$output
4451  );
4452
4453  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
4454  TF_DerivedOperandTypeAttr index_type = TF_DerivedOperandTypeAttr<0>;
4455
4456  let verifier = [{
4457    return Verify(*this);
4458  }];
4459
4460  let hasFolder = 1;
4461
4462  let builders = [
4463    OpBuilderDAG<(ins "Value":$dims, "Value":$value)>
4464  ];
4465}
4466
4467def TF_FloorOp : TF_Op<"Floor", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
4468  let summary = "Returns element-wise largest integer not greater than x.";
4469
4470  let arguments = (ins
4471    TF_FloatTensor:$x
4472  );
4473
4474  let results = (outs
4475    TF_FloatTensor:$y
4476  );
4477
4478  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4479}
4480
4481def TF_FloorDivOp : TF_Op<"FloorDiv", [NoSideEffect, ResultsBroadcastableShape]>,
4482                    WithBroadcastableBinOpBuilder {
4483  let summary = "Returns x // y element-wise.";
4484
4485  let description = [{
4486*NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
4487[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4488  }];
4489
4490  let arguments = (ins
4491    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
4492    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
4493  );
4494
4495  let results = (outs
4496    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
4497  );
4498
4499  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4500}
4501
4502def TF_FloorModOp : TF_Op<"FloorMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
4503                    WithBroadcastableBinOpBuilder {
4504  let summary = [{
4505Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
4506  }];
4507
4508  let description = [{
4509true, this follows Python semantics in that the result here is consistent
4510with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
4511
4512*NOTE*: `FloorMod` supports broadcasting. More about broadcasting
4513[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4514  }];
4515
4516  let arguments = (ins
4517    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$x,
4518    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$y
4519  );
4520
4521  let results = (outs
4522    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64, TF_Uint64]>:$z
4523  );
4524
4525  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4526}
4527
4528def TF_FusedBatchNormOp : TF_Op<"FusedBatchNorm", [NoSideEffect]> {
4529  let summary = "Batch normalization.";
4530
4531  let description = [{
4532Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4533The size of 1D Tensors matches the dimension C of the 4D Tensors.
4534  }];
4535
4536  let arguments = (ins
4537    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
4538    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4539    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4540    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4541must be empty for training.}]>:$mean,
4542    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4543must be empty for training.}]>:$variance,
4544
4545    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4546    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4547    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4548    DefaultValuedAttr<BoolAttr, "true">:$is_training
4549  );
4550
4551  let results = (outs
4552    Res<TF_Float32Tensor, [{A 4D Tensor for output data.}]>:$y,
4553    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4554to compute the running mean.}]>:$batch_mean,
4555    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4556TensorFlow to compute the running variance.}]>:$batch_variance,
4557    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4558in the gradient computation.}]>:$reserve_space_1,
4559    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4560in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
4561  );
4562
4563  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4564
4565  let hasCanonicalizer = 1;
4566
4567  let verifier = [{
4568    return Verify(*this);
4569  }];
4570}
4571
4572def TF_FusedBatchNormGradOp : TF_Op<"FusedBatchNormGrad", [NoSideEffect]> {
4573  let summary = "Gradient for batch normalization.";
4574
4575  let description = [{
4576Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4577The size of 1D Tensors matches the dimension C of the 4D Tensors.
4578  }];
4579
4580  let arguments = (ins
4581    Arg<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4582    Arg<TF_Float32Tensor, [{A 4D Tensor for input data.}]>:$x,
4583    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4584    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4585mean to be reused in gradient computation. When is_training is
4586False, a 1D Tensor for the population mean to be reused in both
45871st and 2nd order gradient computation.}]>:$reserve_space_1,
4588    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4589variance (inverted variance in the cuDNN case) to be reused in
4590gradient computation. When is_training is False, a 1D Tensor
4591for the population variance to be reused in both 1st and 2nd
4592order gradient computation.}]>:$reserve_space_2,
4593
4594    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4595    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4596    DefaultValuedAttr<BoolAttr, "true">:$is_training
4597  );
4598
4599  let results = (outs
4600    Res<TF_Float32Tensor, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4601    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4602    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4603    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
4604    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4605in FusedBatchNorm.}]>:$reserve_space_4
4606  );
4607
4608  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4609}
4610
4611def TF_FusedBatchNormGradV2Op : TF_Op<"FusedBatchNormGradV2", [NoSideEffect]> {
4612  let summary = "Gradient for batch normalization.";
4613
4614  let description = [{
4615Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4616The size of 1D Tensors matches the dimension C of the 4D Tensors.
4617  }];
4618
4619  let arguments = (ins
4620    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4621    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4622    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4623    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4624mean to be reused in gradient computation. When is_training is
4625False, a 1D Tensor for the population mean to be reused in both
46261st and 2nd order gradient computation.}]>:$reserve_space_1,
4627    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4628variance (inverted variance in the cuDNN case) to be reused in
4629gradient computation. When is_training is False, a 1D Tensor
4630for the population variance to be reused in both 1st and 2nd
4631order gradient computation.}]>:$reserve_space_2,
4632
4633    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4634    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4635    DefaultValuedAttr<BoolAttr, "true">:$is_training
4636  );
4637
4638  let results = (outs
4639    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4640    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4641    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4642    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_3,
4643    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4644in FusedBatchNorm.}]>:$reserve_space_4
4645  );
4646
4647  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4648  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
4649}
4650
4651def TF_FusedBatchNormGradV3Op : TF_Op<"FusedBatchNormGradV3", [NoSideEffect, TF_LayoutSensitiveInterface]> {
4652  let summary = "Gradient for batch normalization.";
4653
4654  let description = [{
4655Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4656The size of 1D Tensors matches the dimension C of the 4D Tensors.
4657  }];
4658
4659  let arguments = (ins
4660    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to y.}]>:$y_backprop,
4661    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4662    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4663    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4664mean to be reused in gradient computation. When is_training is
4665False, a 1D Tensor for the population mean to be reused in both
46661st and 2nd order gradient computation.}]>:$reserve_space_1,
4667    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for the computed batch
4668variance (inverted variance in the cuDNN case) to be reused in
4669gradient computation. When is_training is False, a 1D Tensor
4670for the population variance to be reused in both 1st and 2nd
4671order gradient computation.}]>:$reserve_space_2,
4672    Arg<TF_Float32Tensor, [{When is_training is True, a 1D Tensor for some intermediate results to be reused
4673in gradient computation. When is_training is False, a dummy empty Tensor will be
4674created.}]>:$reserve_space_3,
4675
4676    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4677    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
4678    DefaultValuedAttr<BoolAttr, "true">:$is_training
4679  );
4680
4681  let results = (outs
4682    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for the gradient with respect to x.}]>:$x_backprop,
4683    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to scale.}]>:$scale_backprop,
4684    Res<TF_Float32Tensor, [{A 1D Tensor for the gradient with respect to offset.}]>:$offset_backprop,
4685    Res<TF_Float32Tensor, [{Unused placeholder to match the mean input in FusedBatchNorm.}]>:$reserve_space_4,
4686    Res<TF_Float32Tensor, [{Unused placeholder to match the variance input
4687in FusedBatchNorm.}]>:$reserve_space_5
4688  );
4689
4690  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4691  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<3>;
4692
4693  let extraClassDeclaration = [{
4694    // TF_LayoutSensitiveInterface:
4695    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0, 1}; }
4696    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
4697    StringRef GetOptimalLayout(const RuntimeDevices& devices);
4698    LogicalResult UpdateDataFormat(StringRef data_format);
4699  }];
4700}
4701
4702def TF_FusedBatchNormV2Op : TF_Op<"FusedBatchNormV2", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
4703  let summary = "Batch normalization.";
4704
4705  let description = [{
4706Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4707The size of 1D Tensors matches the dimension C of the 4D Tensors.
4708  }];
4709
4710  let arguments = (ins
4711    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4712    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4713    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4714    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4715must be empty for training.}]>:$mean,
4716    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4717must be empty for training.}]>:$variance,
4718
4719    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4720    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4721    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
4722    DefaultValuedAttr<BoolAttr, "true">:$is_training
4723  );
4724
4725  let results = (outs
4726    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
4727    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4728to compute the running mean.}]>:$batch_mean,
4729    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4730TensorFlow to compute the running variance.}]>:$batch_variance,
4731    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4732in the gradient computation.}]>:$reserve_space_1,
4733    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4734in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2
4735  );
4736
4737  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4738  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
4739
4740  let extraClassDeclaration = [{
4741    // TF_FoldOperandsTransposeInterface:
4742    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
4743    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
4744    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
4745
4746    // TF_LayoutSensitiveInterface:
4747    StringRef GetOptimalLayout(const RuntimeDevices& devices);
4748    LogicalResult UpdateDataFormat(StringRef data_format);
4749  }];
4750}
4751
4752def TF_FusedBatchNormV3Op : TF_Op<"FusedBatchNormV3", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
4753  let summary = "Batch normalization.";
4754
4755  let description = [{
4756Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW".
4757The size of 1D Tensors matches the dimension C of the 4D Tensors.
4758  }];
4759
4760  let arguments = (ins
4761    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for input data.}]>:$x,
4762    Arg<TF_Float32Tensor, [{A 1D Tensor for scaling factor, to scale the normalized x.}]>:$scale,
4763    Arg<TF_Float32Tensor, [{A 1D Tensor for offset, to shift to the normalized x.}]>:$offset,
4764    Arg<TF_Float32Tensor, [{A 1D Tensor for population mean. Used for inference only;
4765must be empty for training.}]>:$mean,
4766    Arg<TF_Float32Tensor, [{A 1D Tensor for population variance. Used for inference only;
4767must be empty for training.}]>:$variance,
4768
4769    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
4770    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
4771    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NDHWC", "NCDHW"]>, "NHWC">:$data_format,
4772    DefaultValuedAttr<BoolAttr, "true">:$is_training
4773  );
4774
4775  let results = (outs
4776    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{A 4D Tensor for output data.}]>:$y,
4777    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be used by TensorFlow
4778to compute the running mean.}]>:$batch_mean,
4779    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance, to be used by
4780TensorFlow to compute the running variance.}]>:$batch_variance,
4781    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch mean, to be reused
4782in the gradient computation.}]>:$reserve_space_1,
4783    Res<TF_Float32Tensor, [{A 1D Tensor for the computed batch variance (inverted variance
4784in the cuDNN case), to be reused in the gradient computation.}]>:$reserve_space_2,
4785    Res<TF_Float32Tensor, [{A 1D Tensor for some intermediate results, to be reused in the gradient
4786computation for better efficiency.}]>:$reserve_space_3
4787  );
4788
4789  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
4790  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
4791
4792  let extraClassDeclaration = [{
4793    // TF_FoldOperandsTransposeInterface:
4794    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
4795    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
4796    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
4797
4798    // TF_LayoutSensitiveInterface:
4799    StringRef GetOptimalLayout(const RuntimeDevices& devices);
4800    LogicalResult UpdateDataFormat(StringRef data_format);
4801  }];
4802}
4803
4804def TF_GatherOp : TF_Op<"Gather", [NoSideEffect]> {
4805  let summary = "Gather slices from `params` according to `indices`.";
4806
4807  let description = [{
4808`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
4809Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
4810
4811```python
4812    # Scalar indices
4813    output[:, ..., :] = params[indices, :, ... :]
4814
4815    # Vector indices
4816    output[i, :, ..., :] = params[indices[i], :, ... :]
4817
4818    # Higher rank indices
4819    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
4820```
4821
4822If `indices` is a permutation and `len(indices) == params.shape[0]` then
4823this operation will permute `params` accordingly.
4824
4825`validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in
4826`indices` are always validated to be within range. If assigned to GPU,
4827out-of-bound indices result in safe but unspecified behavior, which may include
4828raising an error.
4829
4830<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
4831<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
4832</div>
4833  }];
4834
4835  let arguments = (ins
4836    TF_Tensor:$params,
4837    TF_I32OrI64Tensor:$indices,
4838
4839    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
4840  );
4841
4842  let results = (outs
4843    TF_Tensor:$output
4844  );
4845
4846  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
4847  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
4848}
4849
4850def TF_GatherNdOp : TF_Op<"GatherNd", [NoSideEffect]> {
4851  let summary = [{
4852Gather slices from `params` into a Tensor with shape specified by `indices`.
4853  }];
4854
4855  let description = [{
4856`indices` is a K-dimensional integer tensor, best thought of as a
4857(K-1)-dimensional tensor of indices into `params`, where each element defines a
4858slice of `params`:
4859
4860    output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
4861
4862Whereas in `tf.gather` `indices` defines slices into the `axis`
4863dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the
4864first `N` dimensions of `params`, where `N = indices.shape[-1]`.
4865
4866The last dimension of `indices` can be at most the rank of
4867`params`:
4868
4869    indices.shape[-1] <= params.rank
4870
4871The last dimension of `indices` corresponds to elements
4872(if `indices.shape[-1] == params.rank`) or slices
4873(if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]`
4874of `params`.  The output tensor has shape
4875
4876    indices.shape[:-1] + params.shape[indices.shape[-1]:]
4877
4878Note that on CPU, if an out of bound index is found, an error is returned.
4879On GPU, if an out of bound index is found, a 0 is stored in the
4880corresponding output value.
4881
4882Some examples below.
4883
4884Simple indexing into a matrix:
4885
4886```python
4887    indices = [[0, 0], [1, 1]]
4888    params = [['a', 'b'], ['c', 'd']]
4889    output = ['a', 'd']
4890```
4891
4892Slice indexing into a matrix:
4893
4894```python
4895    indices = [[1], [0]]
4896    params = [['a', 'b'], ['c', 'd']]
4897    output = [['c', 'd'], ['a', 'b']]
4898```
4899
4900Indexing into a 3-tensor:
4901
4902```python
4903    indices = [[1]]
4904    params = [[['a0', 'b0'], ['c0', 'd0']],
4905              [['a1', 'b1'], ['c1', 'd1']]]
4906    output = [[['a1', 'b1'], ['c1', 'd1']]]
4907
4908
4909    indices = [[0, 1], [1, 0]]
4910    params = [[['a0', 'b0'], ['c0', 'd0']],
4911              [['a1', 'b1'], ['c1', 'd1']]]
4912    output = [['c0', 'd0'], ['a1', 'b1']]
4913
4914
4915    indices = [[0, 0, 1], [1, 0, 1]]
4916    params = [[['a0', 'b0'], ['c0', 'd0']],
4917              [['a1', 'b1'], ['c1', 'd1']]]
4918    output = ['b0', 'b1']
4919```
4920
4921Batched indexing into a matrix:
4922
4923```python
4924    indices = [[[0, 0]], [[0, 1]]]
4925    params = [['a', 'b'], ['c', 'd']]
4926    output = [['a'], ['b']]
4927```
4928
4929Batched slice indexing into a matrix:
4930
4931```python
4932    indices = [[[1]], [[0]]]
4933    params = [['a', 'b'], ['c', 'd']]
4934    output = [[['c', 'd']], [['a', 'b']]]
4935```
4936
4937Batched indexing into a 3-tensor:
4938
4939```python
4940    indices = [[[1]], [[0]]]
4941    params = [[['a0', 'b0'], ['c0', 'd0']],
4942              [['a1', 'b1'], ['c1', 'd1']]]
4943    output = [[[['a1', 'b1'], ['c1', 'd1']]],
4944              [[['a0', 'b0'], ['c0', 'd0']]]]
4945
4946    indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]]
4947    params = [[['a0', 'b0'], ['c0', 'd0']],
4948              [['a1', 'b1'], ['c1', 'd1']]]
4949    output = [[['c0', 'd0'], ['a1', 'b1']],
4950              [['a0', 'b0'], ['c1', 'd1']]]
4951
4952
4953    indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]]
4954    params = [[['a0', 'b0'], ['c0', 'd0']],
4955              [['a1', 'b1'], ['c1', 'd1']]]
4956    output = [['b0', 'b1'], ['d0', 'c1']]
4957```
4958
4959See also `tf.gather` and `tf.batch_gather`.
4960  }];
4961
4962  let arguments = (ins
4963    Arg<TF_Tensor, [{The tensor from which to gather values.}]>:$params,
4964    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices
4965  );
4966
4967  let results = (outs
4968    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
4969shape `indices.shape[:-1] + params.shape[indices.shape[-1]:]`.}]>:$output
4970  );
4971
4972  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
4973  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
4974}
4975
4976def TF_GatherV2Op : TF_Op<"GatherV2", [NoSideEffect]> {
4977  let summary = [{
4978Gather slices from `params` axis `axis` according to `indices`.
4979  }];
4980
4981  let description = [{
4982`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
4983Produces an output tensor with shape `params.shape[:axis] +
4984indices.shape[batch_dims:] + params.shape[axis + 1:]` where:
4985
4986```python
4987    # Scalar indices (output is rank(params) - 1).
4988    output[a_0, ..., a_n, b_0, ..., b_n] =
4989      params[a_0, ..., a_n, indices, b_0, ..., b_n]
4990
4991    # Vector indices (output is rank(params)).
4992    output[a_0, ..., a_n, i, b_0, ..., b_n] =
4993      params[a_0, ..., a_n, indices[i], b_0, ..., b_n]
4994
4995    # Higher rank indices (output is rank(params) + rank(indices) - 1).
4996    output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] =
4997      params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n]
4998```
4999
5000<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
5001<img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt>
5002</div>
5003
5004Note that on CPU, if an out of bound index is found, an error is returned.
5005On GPU, if an out of bound index is found, a 0 is stored in the
5006corresponding output value.
5007
5008See also `tf.batch_gather` and `tf.gather_nd`.
5009  }];
5010
5011  let arguments = (ins
5012    Arg<TF_Tensor, [{The tensor from which to gather values. Must be at least rank
5013`axis + 1`.}]>:$params,
5014    Arg<TF_I32OrI64Tensor, [{Index tensor. Must be in range `[0, params.shape[axis])`.}]>:$indices,
5015    Arg<TF_I32OrI64Tensor, [{The axis in `params` to gather `indices` from. Defaults to the first
5016dimension. Supports negative indexes.}]>:$axis,
5017
5018    DefaultValuedAttr<I64Attr, "0">:$batch_dims
5019  );
5020
5021  let results = (outs
5022    Res<TF_Tensor, [{Values from `params` gathered from indices given by `indices`, with
5023shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]`.}]>:$output
5024  );
5025
5026  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
5027  TF_DerivedOperandTypeAttr Tparams = TF_DerivedOperandTypeAttr<0>;
5028  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
5029
5030  let verifier = [{
5031    return Verify(*this);
5032  }];
5033}
5034
5035def TF_GreaterOp : TF_Op<"Greater", [NoSideEffect, ResultsBroadcastableShape]>,
5036                   WithBroadcastableCmpOpBuilder {
5037  let summary = "Returns the truth value of (x > y) element-wise.";
5038
5039  let description = [{
5040*NOTE*: `Greater` supports broadcasting. More about broadcasting
5041[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5042
5043Example:
5044
5045```python
5046x = tf.constant([5, 4, 6])
5047y = tf.constant([5, 2, 5])
5048tf.math.greater(x, y) ==> [False, True, True]
5049
5050x = tf.constant([5, 4, 6])
5051y = tf.constant([5])
5052tf.math.greater(x, y) ==> [False, False, True]
5053```
5054  }];
5055
5056  let arguments = (ins
5057    TF_IntOrFpTensor:$x,
5058    TF_IntOrFpTensor:$y
5059  );
5060
5061  let results = (outs
5062    TF_BoolTensor:$z
5063  );
5064
5065  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5066}
5067
5068def TF_GreaterEqualOp : TF_Op<"GreaterEqual", [NoSideEffect, ResultsBroadcastableShape]>,
5069                        WithBroadcastableCmpOpBuilder {
5070  let summary = "Returns the truth value of (x >= y) element-wise.";
5071
5072  let description = [{
5073*NOTE*: `GreaterEqual` supports broadcasting. More about broadcasting
5074[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5075
5076Example:
5077
5078```python
5079x = tf.constant([5, 4, 6, 7])
5080y = tf.constant([5, 2, 5, 10])
5081tf.math.greater_equal(x, y) ==> [True, True, True, False]
5082
5083x = tf.constant([5, 4, 6, 7])
5084y = tf.constant([5])
5085tf.math.greater_equal(x, y) ==> [True, False, True, True]
5086```
5087  }];
5088
5089  let arguments = (ins
5090    TF_IntOrFpTensor:$x,
5091    TF_IntOrFpTensor:$y
5092  );
5093
5094  let results = (outs
5095    TF_BoolTensor:$z
5096  );
5097
5098  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5099}
5100
5101def TF_HSVToRGBOp : TF_Op<"HSVToRGB", [NoSideEffect]> {
5102  let summary = "Convert one or more images from HSV to RGB.";
5103
5104  let description = [{
5105Outputs a tensor of the same shape as the `images` tensor, containing the RGB
5106value of the pixels. The output is only well defined if the value in `images`
5107are in `[0,1]`.
5108
5109See `rgb_to_hsv` for a description of the HSV encoding.
5110  }];
5111
5112  let arguments = (ins
5113    Arg<TF_FloatTensor, [{1-D or higher rank. HSV data to convert. Last dimension must be size 3.}]>:$images
5114  );
5115
5116  let results = (outs
5117    Res<TF_FloatTensor, [{`images` converted to RGB.}]>:$output
5118  );
5119
5120  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5121}
5122
5123def TF_HashTableV2Op : TF_Op<"HashTableV2", []> {
5124  let summary = "Creates a non-initialized hash table.";
5125
5126  let description = [{
5127This op creates a hash table, specifying the type of its keys and values.
5128Before using the table you will have to initialize it.  After initialization the
5129table will be immutable.
5130  }];
5131
5132  let arguments = (ins
5133    StrAttr:$container,
5134    StrAttr:$shared_name,
5135    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
5136    TypeAttr:$key_dtype,
5137    TypeAttr:$value_dtype
5138  );
5139
5140  let results = (outs
5141    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
5142  );
5143}
5144
5145def TF_IFFTOp : TF_Op<"IFFT", [NoSideEffect]> {
5146  let summary = "Inverse fast Fourier transform.";
5147
5148  let description = [{
5149Computes the inverse 1-dimensional discrete Fourier transform over the
5150inner-most dimension of `input`.
5151  }];
5152
5153  let arguments = (ins
5154    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5155  );
5156
5157  let results = (outs
5158    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most
5159  dimension of `input` is replaced with its inverse 1D Fourier transform.
5160
5161@compatibility(numpy)
5162Equivalent to np.fft.ifft
5163@end_compatibility}]>:$output
5164  );
5165
5166  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5167}
5168
5169def TF_IFFT2DOp : TF_Op<"IFFT2D", [NoSideEffect]> {
5170  let summary = "Inverse 2D fast Fourier transform.";
5171
5172  let description = [{
5173Computes the inverse 2-dimensional discrete Fourier transform over the
5174inner-most 2 dimensions of `input`.
5175  }];
5176
5177  let arguments = (ins
5178    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5179  );
5180
5181  let results = (outs
5182    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 2
5183  dimensions of `input` are replaced with their inverse 2D Fourier transform.
5184
5185@compatibility(numpy)
5186Equivalent to np.fft.ifft2
5187@end_compatibility}]>:$output
5188  );
5189
5190  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5191}
5192
5193def TF_IFFT3DOp : TF_Op<"IFFT3D", [NoSideEffect]> {
5194  let summary = "Inverse 3D fast Fourier transform.";
5195
5196  let description = [{
5197Computes the inverse 3-dimensional discrete Fourier transform over the
5198inner-most 3 dimensions of `input`.
5199  }];
5200
5201  let arguments = (ins
5202    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input
5203  );
5204
5205  let results = (outs
5206    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor of the same shape as `input`. The inner-most 3
5207  dimensions of `input` are replaced with their inverse 3D Fourier transform.
5208
5209@compatibility(numpy)
5210Equivalent to np.fft.ifftn with 3 dimensions.
5211@end_compatibility}]>:$output
5212  );
5213
5214  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5215}
5216
5217def TF_IRFFTOp : TF_Op<"IRFFT", [NoSideEffect]> {
5218  let summary = "Inverse real-valued fast Fourier transform.";
5219
5220  let description = [{
5221Computes the inverse 1-dimensional discrete Fourier transform of a real-valued
5222signal over the inner-most dimension of `input`.
5223
5224The inner-most dimension of `input` is assumed to be the result of `RFFT`: the
5225`fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If
5226`fft_length` is not provided, it is computed from the size of the inner-most
5227dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to
5228compute `input` is odd, it should be provided since it cannot be inferred
5229properly.
5230
5231Along the axis `IRFFT` is computed on, if `fft_length / 2 + 1` is smaller
5232than the corresponding dimension of `input`, the dimension is cropped. If it is
5233larger, the dimension is padded with zeros.
5234  }];
5235
5236  let arguments = (ins
5237    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5238    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
5239  );
5240
5241  let results = (outs
5242    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most
5243  dimension of `input` is replaced with the `fft_length` samples of its inverse
5244  1D Fourier transform.
5245
5246@compatibility(numpy)
5247Equivalent to np.fft.irfft
5248@end_compatibility}]>:$output
5249  );
5250
5251  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5252  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5253}
5254
5255def TF_IRFFT2DOp : TF_Op<"IRFFT2D", [NoSideEffect]> {
5256  let summary = "Inverse 2D real-valued fast Fourier transform.";
5257
5258  let description = [{
5259Computes the inverse 2-dimensional discrete Fourier transform of a real-valued
5260signal over the inner-most 2 dimensions of `input`.
5261
5262The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`:
5263The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5264the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5265from the size of the inner-most 2 dimensions of `input`. If the FFT length used
5266to compute `input` is odd, it should be provided since it cannot be inferred
5267properly.
5268
5269Along each axis `IRFFT2D` is computed on, if `fft_length` (or
5270`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5271corresponding dimension of `input`, the dimension is cropped. If it is larger,
5272the dimension is padded with zeros.
5273  }];
5274
5275  let arguments = (ins
5276    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5277    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
5278  );
5279
5280  let results = (outs
5281    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 2
5282  dimensions of `input` are replaced with the `fft_length` samples of their
5283  inverse 2D Fourier transform.
5284
5285@compatibility(numpy)
5286Equivalent to np.fft.irfft2
5287@end_compatibility}]>:$output
5288  );
5289
5290  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5291  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5292}
5293
5294def TF_IRFFT3DOp : TF_Op<"IRFFT3D", [NoSideEffect]> {
5295  let summary = "Inverse 3D real-valued fast Fourier transform.";
5296
5297  let description = [{
5298Computes the inverse 3-dimensional discrete Fourier transform of a real-valued
5299signal over the inner-most 3 dimensions of `input`.
5300
5301The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`:
5302The inner-most dimension contains the `fft_length / 2 + 1` unique components of
5303the DFT of a real-valued signal. If `fft_length` is not provided, it is computed
5304from the size of the inner-most 3 dimensions of `input`. If the FFT length used
5305to compute `input` is odd, it should be provided since it cannot be inferred
5306properly.
5307
5308Along each axis `IRFFT3D` is computed on, if `fft_length` (or
5309`fft_length / 2 + 1` for the inner-most dimension) is smaller than the
5310corresponding dimension of `input`, the dimension is cropped. If it is larger,
5311the dimension is padded with zeros.
5312  }];
5313
5314  let arguments = (ins
5315    Arg<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex tensor.}]>:$input,
5316    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
5317  );
5318
5319  let results = (outs
5320    Res<TF_F32OrF64Tensor, [{A float32 tensor of the same rank as `input`. The inner-most 3
5321  dimensions of `input` are replaced with the `fft_length` samples of their
5322  inverse 3D real Fourier transform.
5323
5324@compatibility(numpy)
5325Equivalent to np.irfftn with 3 dimensions.
5326@end_compatibility}]>:$output
5327  );
5328
5329  TF_DerivedResultTypeAttr Treal = TF_DerivedResultTypeAttr<0>;
5330  TF_DerivedOperandTypeAttr Tcomplex = TF_DerivedOperandTypeAttr<0>;
5331}
5332
5333def TF_IdentityNOp : TF_Op<"IdentityN", [NoSideEffect]> {
5334  let summary = [{
5335Returns a list of tensors with the same shapes and contents as the input
5336  }];
5337
5338  let description = [{
5339tensors.
5340
5341This op can be used to override the gradient for complicated functions. For
5342example, suppose y = f(x) and we wish to apply a custom function g for backprop
5343such that dx = g(dy). In Python,
5344
5345```python
5346with tf.get_default_graph().gradient_override_map(
5347    {'IdentityN': 'OverrideGradientWithG'}):
5348  y, _ = identity_n([f(x), x])
5349
5350@tf.RegisterGradient('OverrideGradientWithG')
5351def ApplyG(op, dy, _):
5352  return [None, g(dy)]  # Do not backprop to f(x).
5353```
5354  }];
5355
5356  let arguments = (ins
5357    Variadic<TF_Tensor>:$input
5358  );
5359
5360  let results = (outs
5361    Variadic<TF_Tensor>:$output
5362  );
5363
5364  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
5365}
5366
5367def TF_IgammaOp : TF_Op<"Igamma", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5368                  WithBroadcastableBinOpBuilder {
5369  let summary = [{
5370Compute the lower regularized incomplete Gamma function `P(a, x)`.
5371  }];
5372
5373  let description = [{
5374The lower regularized incomplete Gamma function is defined as:
5375
5376
5377\\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
5378
5379where
5380
5381\\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
5382
5383is the lower incomplete Gamma function.
5384
5385Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
5386Gamma function.
5387  }];
5388
5389  let arguments = (ins
5390    TF_F32OrF64Tensor:$a,
5391    TF_F32OrF64Tensor:$x
5392  );
5393
5394  let results = (outs
5395    TF_F32OrF64Tensor:$z
5396  );
5397
5398  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5399}
5400
5401def TF_IgammaGradAOp : TF_Op<"IgammaGradA", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5402                       WithBroadcastableBinOpBuilder {
5403  let summary = "Computes the gradient of `igamma(a, x)` wrt `a`.";
5404
5405  let arguments = (ins
5406    TF_F32OrF64Tensor:$a,
5407    TF_F32OrF64Tensor:$x
5408  );
5409
5410  let results = (outs
5411    TF_F32OrF64Tensor:$z
5412  );
5413
5414  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5415}
5416
5417def TF_IgammacOp : TF_Op<"Igammac", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
5418                   WithBroadcastableBinOpBuilder {
5419  let summary = [{
5420Compute the upper regularized incomplete Gamma function `Q(a, x)`.
5421  }];
5422
5423  let description = [{
5424The upper regularized incomplete Gamma function is defined as:
5425
5426\\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
5427
5428where
5429
5430\\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
5431
5432is the upper incomplete Gama function.
5433
5434Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
5435Gamma function.
5436  }];
5437
5438  let arguments = (ins
5439    TF_F32OrF64Tensor:$a,
5440    TF_F32OrF64Tensor:$x
5441  );
5442
5443  let results = (outs
5444    TF_F32OrF64Tensor:$z
5445  );
5446
5447  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5448}
5449
5450def TF_ImagOp : TF_Op<"Imag", [NoSideEffect, SameOperandsAndResultShape]> {
5451  let summary = "Returns the imaginary part of a complex number.";
5452
5453  let description = [{
5454Given a tensor `input` of complex numbers, this operation returns a tensor of
5455type `float` that is the imaginary part of each element in `input`. All
5456elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
5457is the real part and *b* is the imaginary part returned by this operation.
5458
5459For example:
5460
5461```
5462# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
5463tf.imag(input) ==> [4.75, 5.75]
5464```
5465  }];
5466
5467  let arguments = (ins
5468    TensorOf<[TF_Complex128, TF_Complex64]>:$input
5469  );
5470
5471  let results = (outs
5472    TF_F32OrF64Tensor:$output
5473  );
5474
5475  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5476  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
5477}
5478
5479def TF_InTopKV2Op : TF_Op<"InTopKV2", [NoSideEffect]> {
5480  let summary = "Says whether the targets are in the top `K` predictions.";
5481
5482  let description = [{
5483This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the
5484prediction for the target class is among the top `k` predictions among
5485all predictions for example `i`. Note that the behavior of `InTopK` differs
5486from the `TopK` op in its handling of ties; if multiple classes have the
5487same prediction value and straddle the top-`k` boundary, all of those
5488classes are considered to be in the top `k`.
5489
5490More formally, let
5491
5492  \\(predictions_i\\) be the predictions for all classes for example `i`,
5493  \\(targets_i\\) be the target class for example `i`,
5494  \\(out_i\\) be the output for example `i`,
5495
5496$$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$
5497  }];
5498
5499  let arguments = (ins
5500    Arg<TF_Float32Tensor, [{A `batch_size` x `classes` tensor.}]>:$predictions,
5501    Arg<TF_I32OrI64Tensor, [{A `batch_size` vector of class ids.}]>:$targets,
5502    Arg<TF_I32OrI64Tensor, [{Number of top elements to look at for computing precision.}]>:$k
5503  );
5504
5505  let results = (outs
5506    Res<TF_BoolTensor, [{Computed precision at `k` as a `bool Tensor`.}]>:$precision
5507  );
5508
5509  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
5510}
5511
5512def TF_InfeedDequeueOp : TF_Op<"InfeedDequeue", []> {
5513  let summary = [{
5514A placeholder op for a value that will be fed into the computation.
5515  }];
5516
5517  let arguments = (ins
5518    TF_ShapeAttr:$shape
5519  );
5520
5521  let results = (outs
5522    Res<TF_Tensor, [{A tensor that will be provided using the infeed mechanism.}]>:$output
5523  );
5524
5525  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
5526}
5527
5528def TF_InitializeTableV2Op : TF_Op<"InitializeTableV2", []> {
5529  let summary = [{
5530Table initializer that takes two tensors for keys and values respectively.
5531  }];
5532
5533  let arguments = (ins
5534    Arg<TF_ResourceTensor, [{Handle to a table which will be initialized.}], [TF_LookupTableWrite]>:$table_handle,
5535    Arg<TF_Tensor, [{Keys of type Tkey.}]>:$keys,
5536    Arg<TF_Tensor, [{Values of type Tval.}]>:$values
5537  );
5538
5539  let results = (outs);
5540
5541  TF_DerivedOperandTypeAttr Tval = TF_DerivedOperandTypeAttr<2>;
5542  TF_DerivedOperandTypeAttr Tkey = TF_DerivedOperandTypeAttr<1>;
5543}
5544
5545def TF_InplaceAddOp : TF_Op<"InplaceAdd", [NoSideEffect, TF_AllTypesMatch<["x", "y"]>]> {
5546  let summary = "Adds v into specified rows of x.";
5547
5548  let description = [{
5549Computes y = x; y[i, :] += v; return y.
5550  }];
5551
5552  let arguments = (ins
5553    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$x,
5554    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
5555    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
5556  );
5557
5558  let results = (outs
5559    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
5560  );
5561
5562  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5563}
5564
5565def TF_InplaceUpdateOp : TF_Op<"InplaceUpdate", [NoSideEffect]> {
5566  let summary = "Updates specified rows 'i' with values 'v'.";
5567
5568  let description = [{
5569Computes `x[i, :] = v; return x`.
5570
5571Originally this function is mutative however for compilation we make this
5572operation create / operate on a copy of `x`.
5573  }];
5574
5575  let arguments = (ins
5576    Arg<TF_Tensor, [{A tensor of type `T`.}]>:$x,
5577    Arg<TF_Int32Tensor, [{A vector. Indices into the left-most dimension of `x`.}]>:$i,
5578    Arg<TF_Tensor, [{A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size.}]>:$v
5579  );
5580
5581  let results = (outs
5582    Res<TF_Tensor, [{A `Tensor` of type T. An alias of `x`. The content of `y` is undefined if there are duplicates in `i`.}]>:$y
5583  );
5584
5585  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5586}
5587
5588def TF_InvOp : TF_Op<"Inv", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
5589  let summary = "Computes the reciprocal of x element-wise.";
5590
5591  let description = [{
5592I.e., \\(y = 1 / x\\).
5593  }];
5594
5595  let arguments = (ins
5596    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
5597  );
5598
5599  let results = (outs
5600    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
5601  );
5602
5603  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5604}
5605
5606def TF_InvertOp : TF_Op<"Invert", [Involution, NoSideEffect, SameOperandsAndResultType]> {
5607  let summary = [{
5608Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes 10101010.
5609  }];
5610
5611  let description = [{
5612Flip each bit of supported types.  For example, type `int8` (decimal 2) binary 00000010 becomes (decimal -3) binary 11111101.
5613This operation is performed on each element of the tensor argument `x`.
5614
5615Example:
5616```python
5617import tensorflow as tf
5618from tensorflow.python.ops import bitwise_ops
5619
5620# flip 2 (00000010) to -3 (11111101)
5621tf.assert_equal(-3, bitwise_ops.invert(2))
5622
5623dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64,
5624              dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64]
5625
5626inputs = [0, 5, 3, 14]
5627for dtype in dtype_list:
5628  # Because of issues with negative numbers, let's test this indirectly.
5629  # 1. invert(a) and a = 0
5630  # 2. invert(a) or a = invert(0)
5631  input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
5632  not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
5633                                      input_tensor, bitwise_ops.invert(input_tensor)),
5634                                    bitwise_ops.bitwise_or(
5635                                      input_tensor, bitwise_ops.invert(input_tensor)),
5636                                    bitwise_ops.invert(
5637                                      tf.constant(0, dtype=dtype))]
5638
5639  expected = tf.constant([0, 0, 0, 0], dtype=tf.float32)
5640  tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected)
5641
5642  expected = tf.cast([not_0] * 4, tf.float32)
5643  tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected)
5644
5645  # For unsigned dtypes let's also check the result directly.
5646  if dtype.is_unsigned:
5647    inverted = bitwise_ops.invert(input_tensor)
5648    expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32)
5649    tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32))
5650```
5651  }];
5652
5653  let arguments = (ins
5654    TF_IntTensor:$x
5655  );
5656
5657  let results = (outs
5658    TF_IntTensor:$y
5659  );
5660
5661  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5662}
5663
5664def TF_InvertPermutationOp : TF_Op<"InvertPermutation", [NoSideEffect]> {
5665  let summary = "Computes the inverse permutation of a tensor.";
5666
5667  let description = [{
5668This operation computes the inverse of an index permutation. It takes a 1-D
5669integer tensor `x`, which represents the indices of a zero-based array, and
5670swaps each value with its index position. In other words, for an output tensor
5671`y` and an input tensor `x`, this operation computes the following:
5672
5673`y[x[i]] = i for i in [0, 1, ..., len(x) - 1]`
5674
5675The values must include 0. There can be no duplicate values or negative values.
5676
5677For example:
5678
5679```
5680# tensor `x` is [3, 4, 0, 2, 1]
5681invert_permutation(x) ==> [2, 4, 3, 0, 1]
5682```
5683  }];
5684
5685  let arguments = (ins
5686    Arg<TF_I32OrI64Tensor, [{1-D.}]>:$x
5687  );
5688
5689  let results = (outs
5690    Res<TF_I32OrI64Tensor, [{1-D.}]>:$y
5691  );
5692
5693  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5694
5695  let verifier = [{
5696    return Verify(*this);
5697  }];
5698}
5699
5700def TF_IsFiniteOp : TF_Op<"IsFinite", [NoSideEffect, SameOperandsAndResultShape]> {
5701  let summary = "Returns which elements of x are finite.";
5702
5703  let description = [{
5704@compatibility(numpy)
5705Equivalent to np.isfinite
5706@end_compatibility
5707
5708Example:
5709
5710```python
5711x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan])
5712tf.math.is_finite(x) ==> [True, True, True, False, False]
5713```
5714  }];
5715
5716  let arguments = (ins
5717    TF_FloatTensor:$x
5718  );
5719
5720  let results = (outs
5721    TF_BoolTensor:$y
5722  );
5723
5724  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5725}
5726
5727def TF_IsInfOp : TF_Op<"IsInf", [NoSideEffect, SameOperandsAndResultShape]> {
5728  let summary = "Returns which elements of x are Inf.";
5729
5730  let description = [{
5731@compatibility(numpy)
5732Equivalent to np.isinf
5733@end_compatibility
5734
5735Example:
5736
5737```python
5738x = tf.constant([5.0, np.inf, 6.8, np.inf])
5739tf.math.is_inf(x) ==> [False, True, False, True]
5740```
5741  }];
5742
5743  let arguments = (ins
5744    TF_FloatTensor:$x
5745  );
5746
5747  let results = (outs
5748    TF_BoolTensor:$y
5749  );
5750
5751  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5752}
5753
5754def TF_IsNanOp : TF_Op<"IsNan", [NoSideEffect, SameOperandsAndResultShape]> {
5755  let summary = "Returns which elements of x are NaN.";
5756
5757  let description = [{
5758@compatibility(numpy)
5759Equivalent to np.isnan
5760@end_compatibility
5761
5762Example:
5763
5764```python
5765x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf])
5766tf.math.is_nan(x) ==> [False, True, False, True, False]
5767```
5768  }];
5769
5770  let arguments = (ins
5771    TF_FloatTensor:$x
5772  );
5773
5774  let results = (outs
5775    TF_BoolTensor:$y
5776  );
5777
5778  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5779}
5780
5781def TF_IteratorOp : TF_Op<"Iterator", []> {
5782  let summary = "A container for an iterator resource.";
5783
5784  let arguments = (ins
5785    StrAttr:$shared_name,
5786    StrAttr:$container,
5787    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5788    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5789  );
5790
5791  let results = (outs
5792    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator"
5793or "IteratorGetNext" op.}], [TF_DatasetIteratorAlloc]>:$handle
5794  );
5795}
5796
5797def TF_IteratorFromStringHandleOp : TF_Op<"IteratorFromStringHandle", []> {
5798  let summary = [{
5799Converts the given string representing a handle to an iterator to a resource.
5800  }];
5801
5802  let arguments = (ins
5803    Arg<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle,
5804
5805    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
5806    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
5807  );
5808
5809  let results = (outs
5810    Res<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorAlloc]>:$resource_handle
5811  );
5812}
5813
5814def TF_IteratorFromStringHandleV2Op : TF_Op<"IteratorFromStringHandleV2", []> {
5815  let summary = "";
5816
5817  let arguments = (ins
5818    TF_StrTensor:$string_handle,
5819
5820    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
5821    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
5822  );
5823
5824  let results = (outs
5825    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$resource_handle
5826  );
5827}
5828
5829def TF_IteratorGetNextOp : TF_Op<"IteratorGetNext", []> {
5830  let summary = "Gets the next output from the given iterator .";
5831
5832  let arguments = (ins
5833    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
5834  );
5835
5836  let results = (outs
5837    Variadic<TF_Tensor>:$components
5838  );
5839
5840  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
5841  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
5842}
5843
5844def TF_IteratorGetNextAsOptionalOp : TF_Op<"IteratorGetNextAsOptional", []> {
5845  let summary = [{
5846Gets the next output from the given iterator as an Optional variant.
5847  }];
5848
5849  let arguments = (ins
5850    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator,
5851
5852    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5853    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5854  );
5855
5856  let results = (outs
5857    TF_VariantTensor:$optional
5858  );
5859}
5860
5861def TF_IteratorGetNextSyncOp : TF_Op<"IteratorGetNextSync", []> {
5862  let summary = "Gets the next output from the given iterator.";
5863
5864  let description = [{
5865This operation is a synchronous version IteratorGetNext. It should only be used
5866in situations where the iterator does not block the calling thread, or where
5867the calling thread is not a member of the thread pool used to execute parallel
5868operations (e.g. in eager mode).
5869  }];
5870
5871  let arguments = (ins
5872    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$iterator
5873  );
5874
5875  let results = (outs
5876    Variadic<TF_Tensor>:$components
5877  );
5878
5879  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
5880  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
5881}
5882
5883def TF_IteratorToStringHandleOp : TF_Op<"IteratorToStringHandle", []> {
5884  let summary = [{
5885Converts the given `resource_handle` representing an iterator to a string.
5886  }];
5887
5888  let arguments = (ins
5889    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle
5890  );
5891
5892  let results = (outs
5893    Res<TF_StrTensor, [{A string representation of the given handle.}]>:$string_handle
5894  );
5895}
5896
5897def TF_IteratorV2Op : TF_Op<"IteratorV2", []> {
5898  let summary = "";
5899
5900  let arguments = (ins
5901    StrAttr:$shared_name,
5902    StrAttr:$container,
5903    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
5904    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
5905  );
5906
5907  let results = (outs
5908    Res<TF_ResourceTensor, "", [TF_DatasetIteratorAlloc]>:$handle
5909  );
5910}
5911
5912def TF_KthOrderStatisticOp : TF_Op<"KthOrderStatistic", [NoSideEffect]> {
5913  let summary = "Computes the Kth order statistic of a data set. The current";
5914
5915  let description = [{
5916implementation uses a binary search requiring exactly 32 passes over
5917the input data. The running time is linear with respect to input
5918size. The median-of-medians algorithm is probably faster, but is
5919difficult to implement efficiently in XLA. The implementation imposes
5920a total ordering on floats. The ordering is consistent with the usual
5921partial order.  Positive NaNs are greater than positive
5922infinity. Negative NaNs are less than negative infinity. NaNs with
5923distinct payloads are treated as distinct. Subnormal numbers are
5924preserved (not flushed to zero). Positive infinity is greater than all
5925numbers. Negative infinity is less than all numbers. Positive is
5926greater than negative zero. There are less than k values greater than
5927the kth order statistic. There are at least k values greater than or
5928equal to the Kth order statistic. The semantics are not the same as
5929top_k_unique.
5930  }];
5931
5932  let arguments = (ins
5933    TF_Float32Tensor:$input,
5934
5935    I64Attr:$k
5936  );
5937
5938  let results = (outs
5939    TF_Float32Tensor:$output
5940  );
5941}
5942
5943def TF_L2LossOp : TF_Op<"L2Loss", [NoSideEffect]> {
5944  let summary = "L2 Loss.";
5945
5946  let description = [{
5947Computes half the L2 norm of a tensor without the `sqrt`:
5948
5949    output = sum(t ** 2) / 2
5950  }];
5951
5952  let arguments = (ins
5953    Arg<TF_FloatTensor, [{Typically 2-D, but may have any dimensions.}]>:$t
5954  );
5955
5956  let results = (outs
5957    Res<TF_FloatTensor, [{0-D.}]>:$output
5958  );
5959
5960  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5961}
5962
5963def TF_LRNOp : TF_Op<"LRN", [NoSideEffect]> {
5964  let summary = "Local Response Normalization.";
5965
5966  let description = [{
5967The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last
5968dimension), and each vector is normalized independently.  Within a given vector,
5969each component is divided by the weighted, squared sum of inputs within
5970`depth_radius`.  In detail,
5971
5972    sqr_sum[a, b, c, d] =
5973        sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
5974    output = input / (bias + alpha * sqr_sum) ** beta
5975
5976For details, see [Krizhevsky et al., ImageNet classification with deep
5977convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
5978  }];
5979
5980  let arguments = (ins
5981    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D.}]>:$input,
5982
5983    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
5984    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
5985    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
5986    DefaultValuedAttr<F32Attr, "0.5f">:$beta
5987  );
5988
5989  let results = (outs
5990    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
5991  );
5992
5993  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
5994}
5995
5996def TF_LRNGradOp : TF_Op<"LRNGrad", [NoSideEffect]> {
5997  let summary = "Gradients for Local Response Normalization.";
5998
5999  let arguments = (ins
6000    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_grads,
6001    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$input_image,
6002    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$output_image,
6003
6004    DefaultValuedAttr<I64Attr, "5">:$depth_radius,
6005    DefaultValuedAttr<F32Attr, "1.0f">:$bias,
6006    DefaultValuedAttr<F32Attr, "1.0f">:$alpha,
6007    DefaultValuedAttr<F32Attr, "0.5f">:$beta
6008  );
6009
6010  let results = (outs
6011    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The gradients for LRN.}]>:$output
6012  );
6013
6014  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6015}
6016
6017def TF_LeakyReluOp : TF_Op<"LeakyRelu", [NoSideEffect, TF_ContractionFusableInterface, TF_SameOperandsAndResultTypeResolveRef]> {
6018  let summary = "Computes rectified linear: `max(features, features * alpha)`.";
6019
6020  let arguments = (ins
6021    TF_FloatTensor:$features,
6022
6023    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
6024  );
6025
6026  let results = (outs
6027    TF_FloatTensor:$activations
6028  );
6029
6030  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6031
6032  let hasFolder = 1;
6033
6034  let extraClassDeclaration = [{
6035    // TF_ContractionFusableInterface:
6036    Optional<ContractionFusion> GetContractionFusion();
6037  }];
6038}
6039
6040def TF_LeakyReluGradOp : TF_Op<"LeakyReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6041  let summary = [{
6042Computes rectified linear gradients for a LeakyRelu operation.
6043  }];
6044
6045  let arguments = (ins
6046    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding LeakyRelu operation.}]>:$gradients,
6047    Arg<TF_FloatTensor, [{The features passed as input to the corresponding LeakyRelu operation,
6048OR the outputs of that operation (both work equivalently).}]>:$features,
6049
6050    DefaultValuedAttr<F32Attr, "0.2f">:$alpha
6051  );
6052
6053  let results = (outs
6054    Res<TF_FloatTensor, [{`gradients * (features > 0) + alpha * gradients * (features <= 0)`.}]>:$backprops
6055  );
6056
6057  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6058}
6059
6060def TF_LeftShiftOp : TF_Op<"LeftShift", [NoSideEffect, ResultsBroadcastableShape]>,
6061                     WithBroadcastableBinOpBuilder {
6062  let summary = "Elementwise computes the bitwise left-shift of `x` and `y`.";
6063
6064  let description = [{
6065If `y` is negative, or greater than or equal to the width of `x` in bits the
6066result is implementation defined.
6067
6068Example:
6069
6070```python
6071import tensorflow as tf
6072from tensorflow.python.ops import bitwise_ops
6073import numpy as np
6074dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
6075
6076for dtype in dtype_list:
6077  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
6078  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
6079
6080  left_shift_result = bitwise_ops.left_shift(lhs, rhs)
6081
6082  print(left_shift_result)
6083
6084# This will print:
6085# tf.Tensor([ -32   -5 -128    0], shape=(4,), dtype=int8)
6086# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int16)
6087# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int32)
6088# tf.Tensor([   -32     -5   -384 -28672], shape=(4,), dtype=int64)
6089
6090lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
6091rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
6092bitwise_ops.left_shift(lhs, rhs)
6093# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
6094```
6095  }];
6096
6097  let arguments = (ins
6098    TF_IntTensor:$x,
6099    TF_IntTensor:$y
6100  );
6101
6102  let results = (outs
6103    TF_IntTensor:$z
6104  );
6105
6106  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6107}
6108
6109def TF_LessOp : TF_Op<"Less", [NoSideEffect, ResultsBroadcastableShape]>,
6110                WithBroadcastableCmpOpBuilder {
6111  let summary = "Returns the truth value of (x < y) element-wise.";
6112
6113  let description = [{
6114*NOTE*: `Less` supports broadcasting. More about broadcasting
6115[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6116
6117Example:
6118
6119```python
6120x = tf.constant([5, 4, 6])
6121y = tf.constant([5])
6122tf.math.less(x, y) ==> [False, True, False]
6123
6124x = tf.constant([5, 4, 6])
6125y = tf.constant([5, 6, 7])
6126tf.math.less(x, y) ==> [False, True, True]
6127```
6128  }];
6129
6130  let arguments = (ins
6131    TF_IntOrFpTensor:$x,
6132    TF_IntOrFpTensor:$y
6133  );
6134
6135  let results = (outs
6136    TF_BoolTensor:$z
6137  );
6138
6139  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6140}
6141
6142def TF_LessEqualOp : TF_Op<"LessEqual", [NoSideEffect, ResultsBroadcastableShape]>,
6143                     WithBroadcastableCmpOpBuilder {
6144  let summary = "Returns the truth value of (x <= y) element-wise.";
6145
6146  let description = [{
6147*NOTE*: `LessEqual` supports broadcasting. More about broadcasting
6148[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6149
6150Example:
6151
6152```python
6153x = tf.constant([5, 4, 6])
6154y = tf.constant([5])
6155tf.math.less_equal(x, y) ==> [True, True, False]
6156
6157x = tf.constant([5, 4, 6])
6158y = tf.constant([5, 6, 6])
6159tf.math.less_equal(x, y) ==> [True, True, True]
6160```
6161  }];
6162
6163  let arguments = (ins
6164    TF_IntOrFpTensor:$x,
6165    TF_IntOrFpTensor:$y
6166  );
6167
6168  let results = (outs
6169    TF_BoolTensor:$z
6170  );
6171
6172  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6173}
6174
6175def TF_LgammaOp : TF_Op<"Lgamma", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
6176  let summary = [{
6177Computes the log of the absolute value of `Gamma(x)` element-wise.
6178  }];
6179
6180  let description = [{
6181For positive numbers, this function computes log((input - 1)!) for every element in the tensor.
6182  `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539`
6183
6184Example:
6185
6186```python
6187x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6])
6188tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685]
6189```
6190  }];
6191
6192  let arguments = (ins
6193    TF_FloatTensor:$x
6194  );
6195
6196  let results = (outs
6197    TF_FloatTensor:$y
6198  );
6199
6200  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6201}
6202
6203def TF_LinSpaceOp : TF_Op<"LinSpace", [NoSideEffect]> {
6204  let summary = "Generates values in an interval.";
6205
6206  let description = [{
6207A sequence of `num` evenly-spaced values are generated beginning at `start`.
6208If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
6209so that the last one is exactly `stop`.
6210
6211For example:
6212
6213```
6214tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
6215```
6216  }];
6217
6218  let arguments = (ins
6219    Arg<TF_FloatTensor, [{0-D tensor. First entry in the range.}]>:$start,
6220    Arg<TF_FloatTensor, [{0-D tensor. Last entry in the range.}]>:$stop,
6221    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of values to generate.}]>:$num
6222  );
6223
6224  let results = (outs
6225    Res<TF_FloatTensor, [{1-D. The generated values.}]>:$output
6226  );
6227
6228  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6229  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<2>;
6230}
6231
6232def TF_ListDiffOp : TF_Op<"ListDiff", [NoSideEffect]> {
6233  let summary = [{
6234Computes the difference between two lists of numbers or strings.
6235  }];
6236
6237  let description = [{
6238Given a list `x` and a list `y`, this operation returns a list `out` that
6239represents all values that are in `x` but not in `y`. The returned list `out`
6240is sorted in the same order that the numbers appear in `x` (duplicates are
6241preserved). This operation also returns a list `idx` that represents the
6242position of each `out` element in `x`. In other words:
6243
6244`out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]`
6245
6246For example, given this input:
6247
6248```
6249x = [1, 2, 3, 4, 5, 6]
6250y = [1, 3, 5]
6251```
6252
6253This operation would return:
6254
6255```
6256out ==> [2, 4, 6]
6257idx ==> [1, 3, 5]
6258```
6259  }];
6260
6261  let arguments = (ins
6262    Arg<TF_Tensor, [{1-D. Values to keep.}]>:$x,
6263    Arg<TF_Tensor, [{1-D. Values to remove.}]>:$y
6264  );
6265
6266  let results = (outs
6267    Res<TF_Tensor, [{1-D. Values present in `x` but not in `y`.}]>:$out,
6268    Res<TF_I32OrI64Tensor, [{1-D. Positions of `x` values preserved in `out`.}]>:$idx
6269  );
6270
6271  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6272  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
6273}
6274
6275def TF_LoadTPUEmbeddingADAMParametersOp : TF_Op<"LoadTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
6276  let summary = "Load ADAM embedding parameters.";
6277
6278  let description = [{
6279An op that loads optimization parameters into HBM for embedding. Must be
6280preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6281embedding table configuration. For example, this op is used to install
6282parameters that are loaded from a checkpoint before a training loop is
6283executed.
6284  }];
6285
6286  let arguments = (ins
6287    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
6288    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
6289    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
6290
6291    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6292    StrAttr:$table_name,
6293    I64Attr:$num_shards,
6294    I64Attr:$shard_id,
6295    StrAttr:$config
6296  );
6297
6298  let results = (outs);
6299}
6300
6301def TF_LoadTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6302  let summary = "Load ADAM embedding parameters with debug support.";
6303
6304  let description = [{
6305An op that loads optimization parameters into HBM for embedding. Must be
6306preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6307embedding table configuration. For example, this op is used to install
6308parameters that are loaded from a checkpoint before a training loop is
6309executed.
6310  }];
6311
6312  let arguments = (ins
6313    Arg<TF_Float32Tensor, [{Value of parameters used in the ADAM optimization algorithm.}]>:$parameters,
6314    Arg<TF_Float32Tensor, [{Value of momenta used in the ADAM optimization algorithm.}]>:$momenta,
6315    Arg<TF_Float32Tensor, [{Value of velocities used in the ADAM optimization algorithm.}]>:$velocities,
6316    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the ADAM optimization algorithm.}]>:$gradient_accumulators,
6317
6318    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6319    StrAttr:$table_name,
6320    I64Attr:$num_shards,
6321    I64Attr:$shard_id,
6322    StrAttr:$config
6323  );
6324
6325  let results = (outs);
6326}
6327
6328def TF_LoadTPUEmbeddingAdadeltaParametersOp : TF_Op<"LoadTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
6329  let summary = "Load Adadelta embedding parameters.";
6330
6331  let description = [{
6332An op that loads optimization parameters into HBM for embedding. Must be
6333preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6334embedding table configuration. For example, this op is used to install
6335parameters that are loaded from a checkpoint before a training loop is
6336executed.
6337  }];
6338
6339  let arguments = (ins
6340    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
6341    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
6342    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
6343
6344    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6345    StrAttr:$table_name,
6346    I64Attr:$num_shards,
6347    I64Attr:$shard_id,
6348    StrAttr:$config
6349  );
6350
6351  let results = (outs);
6352}
6353
6354def TF_LoadTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6355  let summary = "Load Adadelta parameters with debug support.";
6356
6357  let description = [{
6358An op that loads optimization parameters into HBM for embedding. Must be
6359preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6360embedding table configuration. For example, this op is used to install
6361parameters that are loaded from a checkpoint before a training loop is
6362executed.
6363  }];
6364
6365  let arguments = (ins
6366    Arg<TF_Float32Tensor, [{Value of parameters used in the Adadelta optimization algorithm.}]>:$parameters,
6367    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adadelta optimization algorithm.}]>:$accumulators,
6368    Arg<TF_Float32Tensor, [{Value of updates used in the Adadelta optimization algorithm.}]>:$updates,
6369    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
6370
6371    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6372    StrAttr:$table_name,
6373    I64Attr:$num_shards,
6374    I64Attr:$shard_id,
6375    StrAttr:$config
6376  );
6377
6378  let results = (outs);
6379}
6380
6381def TF_LoadTPUEmbeddingAdagradParametersOp : TF_Op<"LoadTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
6382  let summary = "Load Adagrad embedding parameters.";
6383
6384  let description = [{
6385An op that loads optimization parameters into HBM for embedding. Must be
6386preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6387embedding table configuration. For example, this op is used to install
6388parameters that are loaded from a checkpoint before a training loop is
6389executed.
6390  }];
6391
6392  let arguments = (ins
6393    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
6394    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
6395
6396    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6397    StrAttr:$table_name,
6398    I64Attr:$num_shards,
6399    I64Attr:$shard_id,
6400    StrAttr:$config
6401  );
6402
6403  let results = (outs);
6404}
6405
6406def TF_LoadTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6407  let summary = "Load Adagrad embedding parameters with debug support.";
6408
6409  let description = [{
6410An op that loads optimization parameters into HBM for embedding. Must be
6411preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6412embedding table configuration. For example, this op is used to install
6413parameters that are loaded from a checkpoint before a training loop is
6414executed.
6415  }];
6416
6417  let arguments = (ins
6418    Arg<TF_Float32Tensor, [{Value of parameters used in the Adagrad optimization algorithm.}]>:$parameters,
6419    Arg<TF_Float32Tensor, [{Value of accumulators used in the Adagrad optimization algorithm.}]>:$accumulators,
6420    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adagrad optimization algorithm.}]>:$gradient_accumulators,
6421
6422    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6423    StrAttr:$table_name,
6424    I64Attr:$num_shards,
6425    I64Attr:$shard_id,
6426    StrAttr:$config
6427  );
6428
6429  let results = (outs);
6430}
6431
6432def TF_LoadTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
6433  let summary = "Load centered RMSProp embedding parameters.";
6434
6435  let description = [{
6436An op that loads optimization parameters into HBM for embedding. Must be
6437preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6438embedding table configuration. For example, this op is used to install
6439parameters that are loaded from a checkpoint before a training loop is
6440executed.
6441  }];
6442
6443  let arguments = (ins
6444    Arg<TF_Float32Tensor, [{Value of parameters used in the centered RMSProp optimization algorithm.}]>:$parameters,
6445    Arg<TF_Float32Tensor, [{Value of ms used in the centered RMSProp optimization algorithm.}]>:$ms,
6446    Arg<TF_Float32Tensor, [{Value of mom used in the centered RMSProp optimization algorithm.}]>:$mom,
6447    Arg<TF_Float32Tensor, [{Value of mg used in the centered RMSProp optimization algorithm.}]>:$mg,
6448
6449    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6450    StrAttr:$table_name,
6451    I64Attr:$num_shards,
6452    I64Attr:$shard_id,
6453    StrAttr:$config
6454  );
6455
6456  let results = (outs);
6457}
6458
6459def TF_LoadTPUEmbeddingFTRLParametersOp : TF_Op<"LoadTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
6460  let summary = "Load FTRL embedding parameters.";
6461
6462  let description = [{
6463An op that loads optimization parameters into HBM for embedding. Must be
6464preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6465embedding table configuration. For example, this op is used to install
6466parameters that are loaded from a checkpoint before a training loop is
6467executed.
6468  }];
6469
6470  let arguments = (ins
6471    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
6472    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
6473    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
6474
6475    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6476    StrAttr:$table_name,
6477    I64Attr:$num_shards,
6478    I64Attr:$shard_id,
6479    StrAttr:$config
6480  );
6481
6482  let results = (outs);
6483}
6484
6485def TF_LoadTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6486  let summary = "Load FTRL embedding parameters with debug support.";
6487
6488  let description = [{
6489An op that loads optimization parameters into HBM for embedding. Must be
6490preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6491embedding table configuration. For example, this op is used to install
6492parameters that are loaded from a checkpoint before a training loop is
6493executed.
6494  }];
6495
6496  let arguments = (ins
6497    Arg<TF_Float32Tensor, [{Value of parameters used in the FTRL optimization algorithm.}]>:$parameters,
6498    Arg<TF_Float32Tensor, [{Value of accumulators used in the FTRL optimization algorithm.}]>:$accumulators,
6499    Arg<TF_Float32Tensor, [{Value of linears used in the FTRL optimization algorithm.}]>:$linears,
6500    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the FTRL optimization algorithm.}]>:$gradient_accumulators,
6501
6502    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6503    StrAttr:$table_name,
6504    I64Attr:$num_shards,
6505    I64Attr:$shard_id,
6506    StrAttr:$config
6507  );
6508
6509  let results = (outs);
6510}
6511
6512def TF_LoadTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"LoadTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
6513  let summary = "Load MDL Adagrad Light embedding parameters.";
6514
6515  let description = [{
6516An op that loads optimization parameters into HBM for embedding. Must be
6517preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6518embedding table configuration. For example, this op is used to install
6519parameters that are loaded from a checkpoint before a training loop is
6520executed.
6521  }];
6522
6523  let arguments = (ins
6524    Arg<TF_Float32Tensor, [{Value of parameters used in the MDL Adagrad Light optimization algorithm.}]>:$parameters,
6525    Arg<TF_Float32Tensor, [{Value of accumulators used in the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
6526    Arg<TF_Float32Tensor, [{Value of weights used in the MDL Adagrad Light optimization algorithm.}]>:$weights,
6527    Arg<TF_Float32Tensor, [{Value of benefits used in the MDL Adagrad Light optimization algorithm.}]>:$benefits,
6528
6529    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6530    StrAttr:$table_name,
6531    I64Attr:$num_shards,
6532    I64Attr:$shard_id,
6533    StrAttr:$config
6534  );
6535
6536  let results = (outs);
6537}
6538
6539def TF_LoadTPUEmbeddingMomentumParametersOp : TF_Op<"LoadTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
6540  let summary = "Load Momentum embedding parameters.";
6541
6542  let description = [{
6543An op that loads optimization parameters into HBM for embedding. Must be
6544preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6545embedding table configuration. For example, this op is used to install
6546parameters that are loaded from a checkpoint before a training loop is
6547executed.
6548  }];
6549
6550  let arguments = (ins
6551    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
6552    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
6553
6554    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6555    StrAttr:$table_name,
6556    I64Attr:$num_shards,
6557    I64Attr:$shard_id,
6558    StrAttr:$config
6559  );
6560
6561  let results = (outs);
6562}
6563
6564def TF_LoadTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6565  let summary = "Load Momentum embedding parameters with debug support.";
6566
6567  let description = [{
6568An op that loads optimization parameters into HBM for embedding. Must be
6569preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6570embedding table configuration. For example, this op is used to install
6571parameters that are loaded from a checkpoint before a training loop is
6572executed.
6573  }];
6574
6575  let arguments = (ins
6576    Arg<TF_Float32Tensor, [{Value of parameters used in the Momentum optimization algorithm.}]>:$parameters,
6577    Arg<TF_Float32Tensor, [{Value of momenta used in the Momentum optimization algorithm.}]>:$momenta,
6578    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Momentum optimization algorithm.}]>:$gradient_accumulators,
6579
6580    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6581    StrAttr:$table_name,
6582    I64Attr:$num_shards,
6583    I64Attr:$shard_id,
6584    StrAttr:$config
6585  );
6586
6587  let results = (outs);
6588}
6589
6590def TF_LoadTPUEmbeddingProximalAdagradParametersOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
6591  let summary = "Load proximal Adagrad embedding parameters.";
6592
6593  let description = [{
6594An op that loads optimization parameters into HBM for embedding. Must be
6595preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6596embedding table configuration. For example, this op is used to install
6597parameters that are loaded from a checkpoint before a training loop is
6598executed.
6599  }];
6600
6601  let arguments = (ins
6602    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
6603    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
6604
6605    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6606    StrAttr:$table_name,
6607    I64Attr:$num_shards,
6608    I64Attr:$shard_id,
6609    StrAttr:$config
6610  );
6611
6612  let results = (outs);
6613}
6614
6615def TF_LoadTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6616  let summary = [{
6617Load proximal Adagrad embedding parameters with debug support.
6618  }];
6619
6620  let description = [{
6621An op that loads optimization parameters into HBM for embedding. Must be
6622preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6623embedding table configuration. For example, this op is used to install
6624parameters that are loaded from a checkpoint before a training loop is
6625executed.
6626  }];
6627
6628  let arguments = (ins
6629    Arg<TF_Float32Tensor, [{Value of parameters used in the proximal Adagrad optimization algorithm.}]>:$parameters,
6630    Arg<TF_Float32Tensor, [{Value of accumulators used in the proximal Adagrad optimization algorithm.}]>:$accumulators,
6631    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators,
6632
6633    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6634    StrAttr:$table_name,
6635    I64Attr:$num_shards,
6636    I64Attr:$shard_id,
6637    StrAttr:$config
6638  );
6639
6640  let results = (outs);
6641}
6642
6643def TF_LoadTPUEmbeddingProximalYogiParametersOp : TF_Op<"LoadTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
6644  let summary = "";
6645
6646  let arguments = (ins
6647    TF_Float32Tensor:$parameters,
6648    TF_Float32Tensor:$v,
6649    TF_Float32Tensor:$m,
6650
6651    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6652    StrAttr:$table_name,
6653    I64Attr:$num_shards,
6654    I64Attr:$shard_id,
6655    StrAttr:$config
6656  );
6657
6658  let results = (outs);
6659}
6660
6661def TF_LoadTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6662  let summary = "";
6663
6664  let arguments = (ins
6665    TF_Float32Tensor:$parameters,
6666    TF_Float32Tensor:$v,
6667    TF_Float32Tensor:$m,
6668    TF_Float32Tensor:$gradient_accumulators,
6669
6670    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6671    StrAttr:$table_name,
6672    I64Attr:$num_shards,
6673    I64Attr:$shard_id,
6674    StrAttr:$config
6675  );
6676
6677  let results = (outs);
6678}
6679
6680def TF_LoadTPUEmbeddingRMSPropParametersOp : TF_Op<"LoadTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
6681  let summary = "Load RMSProp embedding parameters.";
6682
6683  let description = [{
6684An op that loads optimization parameters into HBM for embedding. Must be
6685preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6686embedding table configuration. For example, this op is used to install
6687parameters that are loaded from a checkpoint before a training loop is
6688executed.
6689  }];
6690
6691  let arguments = (ins
6692    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
6693    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
6694    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
6695
6696    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6697    StrAttr:$table_name,
6698    I64Attr:$num_shards,
6699    I64Attr:$shard_id,
6700    StrAttr:$config
6701  );
6702
6703  let results = (outs);
6704}
6705
6706def TF_LoadTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6707  let summary = "Load RMSProp embedding parameters with debug support.";
6708
6709  let description = [{
6710An op that loads optimization parameters into HBM for embedding. Must be
6711preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6712embedding table configuration. For example, this op is used to install
6713parameters that are loaded from a checkpoint before a training loop is
6714executed.
6715  }];
6716
6717  let arguments = (ins
6718    Arg<TF_Float32Tensor, [{Value of parameters used in the RMSProp optimization algorithm.}]>:$parameters,
6719    Arg<TF_Float32Tensor, [{Value of ms used in the RMSProp optimization algorithm.}]>:$ms,
6720    Arg<TF_Float32Tensor, [{Value of mom used in the RMSProp optimization algorithm.}]>:$mom,
6721    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the RMSProp optimization algorithm.}]>:$gradient_accumulators,
6722
6723    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6724    StrAttr:$table_name,
6725    I64Attr:$num_shards,
6726    I64Attr:$shard_id,
6727    StrAttr:$config
6728  );
6729
6730  let results = (outs);
6731}
6732
6733def TF_LoadTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
6734  let summary = "Load SGD embedding parameters.";
6735
6736  let description = [{
6737An op that loads optimization parameters into HBM for embedding. Must be
6738preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6739embedding table configuration. For example, this op is used to install
6740parameters that are loaded from a checkpoint before a training loop is
6741executed.
6742  }];
6743
6744  let arguments = (ins
6745    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
6746
6747    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6748    StrAttr:$table_name,
6749    I64Attr:$num_shards,
6750    I64Attr:$shard_id,
6751    StrAttr:$config
6752  );
6753
6754  let results = (outs);
6755}
6756
6757def TF_LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"LoadTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
6758  let summary = "Load SGD embedding parameters.";
6759
6760  let description = [{
6761An op that loads optimization parameters into HBM for embedding. Must be
6762preceded by a ConfigureTPUEmbeddingHost op that sets up the correct
6763embedding table configuration. For example, this op is used to install
6764parameters that are loaded from a checkpoint before a training loop is
6765executed.
6766  }];
6767
6768  let arguments = (ins
6769    Arg<TF_Float32Tensor, [{Value of parameters used in the stochastic gradient descent optimization algorithm.}]>:$parameters,
6770    Arg<TF_Float32Tensor, [{Value of gradient_accumulators used in the Adadelta optimization algorithm.}]>:$gradient_accumulators,
6771
6772    DefaultValuedAttr<I64Attr, "-1">:$table_id,
6773    StrAttr:$table_name,
6774    I64Attr:$num_shards,
6775    I64Attr:$shard_id,
6776    StrAttr:$config
6777  );
6778
6779  let results = (outs);
6780}
6781
6782def TF_LogOp : TF_Op<"Log", [NoSideEffect, SameOperandsAndResultType]> {
6783  let summary = "Computes natural logarithm of x element-wise.";
6784
6785  let description = [{
6786I.e., \\(y = \log_e x\\).
6787
6788Example:
6789
6790```python
6791x = tf.constant([0, 0.5, 1, 5])
6792tf.math.log(x) ==> [-inf, -0.6931472,  0. ,  1.609438]
6793```
6794  }];
6795
6796  let arguments = (ins
6797    TF_FpOrComplexTensor:$x
6798  );
6799
6800  let results = (outs
6801    TF_FpOrComplexTensor:$y
6802  );
6803
6804  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6805
6806  let hasCanonicalizer = 1;
6807}
6808
6809def TF_Log1pOp : TF_Op<"Log1p", [NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
6810  let summary = "Computes natural logarithm of (1 + x) element-wise.";
6811
6812  let description = [{
6813I.e., \\(y = \log_e (1 + x)\\).
6814
6815Example:
6816
6817```python
6818x = tf.constant([0, 0.5, 1, 5])
6819tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595]
6820```
6821  }];
6822
6823  let arguments = (ins
6824    TF_FpOrComplexTensor:$x
6825  );
6826
6827  let results = (outs
6828    TF_FpOrComplexTensor:$y
6829  );
6830
6831  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6832}
6833
6834def TF_LogSoftmaxOp : TF_Op<"LogSoftmax", [NoSideEffect, SameOperandsAndResultType]> {
6835  let summary = "Computes log softmax activations.";
6836
6837  let description = [{
6838For each batch `i` and class `j` we have
6839
6840    logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i])))
6841  }];
6842
6843  let arguments = (ins
6844    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
6845  );
6846
6847  let results = (outs
6848    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$logsoftmax
6849  );
6850
6851  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
6852}
6853
6854def TF_LogicalAndOp : TF_Op<"LogicalAnd", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
6855                      WithBroadcastableBinOpBuilder {
6856  let summary = "Returns the truth value of x AND y element-wise.";
6857
6858  let description = [{
6859*NOTE*: `LogicalAnd` supports broadcasting. More about broadcasting
6860[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6861  }];
6862
6863  let arguments = (ins
6864    TF_BoolTensor:$x,
6865    TF_BoolTensor:$y
6866  );
6867
6868  let results = (outs
6869    TF_BoolTensor:$z
6870  );
6871}
6872
6873def TF_LogicalNotOp : TF_Op<"LogicalNot", [Involution, NoSideEffect, SameOperandsAndResultType]> {
6874  let summary = "Returns the truth value of `NOT x` element-wise.";
6875
6876  let arguments = (ins
6877    Arg<TF_BoolTensor, [{A `Tensor` of type `bool`.}]>:$x
6878  );
6879
6880  let results = (outs
6881    Res<TF_BoolTensor, [{A `Tensor` of type `bool` with the same shape as `x`. The logical negation of `x`.}]>:$y
6882  );
6883
6884  let hasCanonicalizer = 1;
6885}
6886
6887def TF_LogicalOrOp : TF_Op<"LogicalOr", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
6888                     WithBroadcastableBinOpBuilder {
6889  let summary = "Returns the truth value of x OR y element-wise.";
6890
6891  let description = [{
6892*NOTE*: `LogicalOr` supports broadcasting. More about broadcasting
6893[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
6894  }];
6895
6896  let arguments = (ins
6897    TF_BoolTensor:$x,
6898    TF_BoolTensor:$y
6899  );
6900
6901  let results = (outs
6902    TF_BoolTensor:$z
6903  );
6904}
6905
6906def TF_LookupTableExportV2Op : TF_Op<"LookupTableExportV2", []> {
6907  let summary = "Outputs all keys and values in the table.";
6908
6909  let arguments = (ins
6910    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle
6911  );
6912
6913  let results = (outs
6914    Res<TF_Tensor, [{Vector of all keys present in the table.}]>:$keys,
6915    Res<TF_Tensor, [{Tensor of all values in the table. Indexed in parallel with `keys`.}]>:$values
6916  );
6917
6918  TF_DerivedResultTypeAttr Tkeys = TF_DerivedResultTypeAttr<0>;
6919  TF_DerivedResultTypeAttr Tvalues = TF_DerivedResultTypeAttr<1>;
6920}
6921
6922def TF_LookupTableFindV2Op : TF_Op<"LookupTableFindV2", []> {
6923  let summary = "Looks up keys in a table, outputs the corresponding values.";
6924
6925  let description = [{
6926The tensor `keys` must of the same type as the keys of the table.
6927The output `values` is of the type of the table values.
6928
6929The scalar `default_value` is the value output for keys not present in the
6930table. It must also be of the same type as the table values.
6931  }];
6932
6933  let arguments = (ins
6934    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableRead]>:$table_handle,
6935    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6936    TF_Tensor:$default_value
6937  );
6938
6939  let results = (outs
6940    Res<TF_Tensor, [{Same shape as `keys`.  Values found in the table, or `default_values`
6941for missing keys.}]>:$values
6942  );
6943
6944  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6945  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6946}
6947
6948def TF_LookupTableImportV2Op : TF_Op<"LookupTableImportV2", []> {
6949  let summary = [{
6950Replaces the contents of the table with the specified keys and values.
6951  }];
6952
6953  let description = [{
6954The tensor `keys` must be of the same type as the keys of the table.
6955The tensor `values` must be of the type of the table values.
6956  }];
6957
6958  let arguments = (ins
6959    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
6960    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6961    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
6962  );
6963
6964  let results = (outs);
6965
6966  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6967  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6968}
6969
6970def TF_LookupTableInsertV2Op : TF_Op<"LookupTableInsertV2", []> {
6971  let summary = "Updates the table to associates keys with values.";
6972
6973  let description = [{
6974The tensor `keys` must be of the same type as the keys of the table.
6975The tensor `values` must be of the type of the table values.
6976  }];
6977
6978  let arguments = (ins
6979    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
6980    Arg<TF_Tensor, [{Any shape.  Keys to look up.}]>:$keys,
6981    Arg<TF_Tensor, [{Values to associate with keys.}]>:$values
6982  );
6983
6984  let results = (outs);
6985
6986  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
6987  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<2>;
6988}
6989
6990def TF_LookupTableRemoveV2Op : TF_Op<"LookupTableRemoveV2", []> {
6991  let summary = "Removes keys and its associated values from a table.";
6992
6993  let description = [{
6994The tensor `keys` must of the same type as the keys of the table. Keys not
6995already in the table are silently ignored.
6996  }];
6997
6998  let arguments = (ins
6999    Arg<TF_ResourceTensor, [{Handle to the table.}], [TF_LookupTableWrite]>:$table_handle,
7000    Arg<TF_Tensor, [{Any shape.  Keys of the elements to remove.}]>:$keys
7001  );
7002
7003  let results = (outs);
7004
7005  TF_DerivedOperandTypeAttr Tin = TF_DerivedOperandTypeAttr<1>;
7006}
7007
7008def TF_LookupTableSizeV2Op : TF_Op<"LookupTableSizeV2", []> {
7009  let summary = "Computes the number of elements in the given table.";
7010
7011  let arguments = (ins
7012    Arg<TF_ResourceTensor, [{Handle to the table.}]>:$table_handle
7013  );
7014
7015  let results = (outs
7016    Res<TF_Int64Tensor, [{Scalar that contains number of elements in the table.}]>:$size
7017  );
7018}
7019
7020def TF_LowerBoundOp : TF_Op<"LowerBound", [NoSideEffect]> {
7021  let summary = [{
7022Applies lower_bound(sorted_search_values, values) along each row.
7023  }];
7024
7025  let description = [{
7026Each set of rows with the same index in (sorted_inputs, values) is treated
7027independently.  The resulting row is the equivalent of calling
7028`np.searchsorted(sorted_inputs, values, side='left')`.
7029
7030The result is not a global index to the entire
7031`Tensor`, but rather just the index in the last dimension.
7032
7033A 2-D example:
7034  sorted_sequence = [[0, 3, 9, 9, 10],
7035                     [1, 2, 3, 4, 5]]
7036  values = [[2, 4, 9],
7037            [0, 2, 6]]
7038
7039  result = LowerBound(sorted_sequence, values)
7040
7041  result == [[1, 2, 2],
7042             [0, 1, 5]]
7043  }];
7044
7045  let arguments = (ins
7046    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
7047    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
7048the values that will be searched for in `sorted_search_values`.}]>:$values
7049  );
7050
7051  let results = (outs
7052    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the first scalar index
7053into the last dimension where values can be inserted without changing the
7054ordered property.}]>:$output
7055  );
7056
7057  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7058  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
7059}
7060
7061def TF_MakeIteratorOp : TF_Op<"MakeIterator", []> {
7062  let summary = [{
7063Makes a new iterator from the given `dataset` and stores it in `iterator`.
7064  }];
7065
7066  let description = [{
7067This operation may be executed multiple times. Each execution will reset the
7068iterator in `iterator` to the first element of `dataset`.
7069  }];
7070
7071  let arguments = (ins
7072    TF_VariantTensor:$dataset,
7073    Arg<TF_ResourceTensor, "", [TF_DatasetIteratorWrite]>:$iterator
7074  );
7075
7076  let results = (outs);
7077}
7078
7079def TF_MakeUniqueOp : TF_Op<"MakeUnique", [NoSideEffect]> {
7080  let summary = [{
7081Make all elements in the non-Batch dimension unique, but \"close\" to
7082  }];
7083
7084  let description = [{
7085their initial value. Never returns a sub-normal number. Never returns
7086zero. The sign of each input element is always identical to the sign
7087of the corresponding output element. Behavior for infinite elements is
7088undefined. Behavior for subnormal elements is undefined.
7089  }];
7090
7091  let arguments = (ins
7092    TF_Float32Tensor:$input
7093  );
7094
7095  let results = (outs
7096    TF_Float32Tensor:$output
7097  );
7098}
7099
7100def TF_MatMulOp : TF_Op<"MatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
7101  let summary = [{
7102Multiply the matrix "a" by the matrix "b".
7103  }];
7104
7105  let description = [{
7106The inputs must be two-dimensional matrices and the inner dimension of
7107"a" (after being transposed if transpose_a is true) must match the
7108outer dimension of "b" (after being transposed if transposed_b is
7109true).
7110
7111*Note*: The default kernel implementation for MatMul on GPUs uses
7112cublas.
7113  }];
7114
7115  let arguments = (ins
7116    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$a,
7117    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$b,
7118
7119    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
7120    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
7121  );
7122
7123  let results = (outs
7124    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$product
7125  );
7126
7127  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7128}
7129
7130def TF_MatrixBandPartOp : TF_Op<"MatrixBandPart", [NoSideEffect, TF_AllTypesMatch<["input", "band"]>]> {
7131  let summary = [{
7132Copy a tensor setting everything outside a central band in each innermost matrix to zero.
7133  }];
7134
7135  let description = [{
7136The `band` part is computed as follows:
7137Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a
7138tensor with the same shape where
7139
7140`band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
7141
7142The indicator function
7143
7144`in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) &&
7145                 (num_upper < 0 || (n-m) <= num_upper)`.
7146
7147For example:
7148
7149```
7150# if 'input' is [[ 0,  1,  2, 3]
7151#                [-1,  0,  1, 2]
7152#                [-2, -1,  0, 1]
7153#                [-3, -2, -1, 0]],
7154
7155tf.matrix_band_part(input, 1, -1) ==> [[ 0,  1,  2, 3]
7156                                       [-1,  0,  1, 2]
7157                                       [ 0, -1,  0, 1]
7158                                       [ 0,  0, -1, 0]],
7159
7160tf.matrix_band_part(input, 2, 1) ==> [[ 0,  1,  0, 0]
7161                                      [-1,  0,  1, 0]
7162                                      [-2, -1,  0, 1]
7163                                      [ 0, -2, -1, 0]]
7164```
7165
7166Useful special cases:
7167
7168```
7169 tf.matrix_band_part(input, 0, -1) ==> Upper triangular part.
7170 tf.matrix_band_part(input, -1, 0) ==> Lower triangular part.
7171 tf.matrix_band_part(input, 0, 0) ==> Diagonal.
7172```
7173  }];
7174
7175  let arguments = (ins
7176    Arg<TF_Tensor, [{Rank `k` tensor.}]>:$input,
7177    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of subdiagonals to keep. If negative, keep entire
7178lower triangle.}]>:$num_lower,
7179    Arg<TF_I32OrI64Tensor, [{0-D tensor. Number of superdiagonals to keep. If negative, keep
7180entire upper triangle.}]>:$num_upper
7181  );
7182
7183  let results = (outs
7184    Res<TF_Tensor, [{Rank `k` tensor of the same shape as input. The extracted banded tensor.}]>:$band
7185  );
7186
7187  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7188  TF_DerivedOperandTypeAttr Tindex = TF_DerivedOperandTypeAttr<1>;
7189
7190  let verifier = [{
7191    return Verify(*this);
7192  }];
7193}
7194
7195def TF_MatrixDiagOp : TF_Op<"MatrixDiag", [NoSideEffect]> {
7196  let summary = [{
7197Returns a batched diagonal tensor with a given batched diagonal values.
7198  }];
7199
7200  let description = [{
7201Given a `diagonal`, this operation returns a tensor with the `diagonal` and
7202everything else padded with zeros. The diagonal is computed as follows:
7203
7204Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a
7205tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where:
7206
7207`output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`.
7208
7209For example:
7210
7211```
7212# 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]]
7213
7214and diagonal.shape = (2, 4)
7215
7216tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0]
7217                                     [0, 2, 0, 0]
7218                                     [0, 0, 3, 0]
7219                                     [0, 0, 0, 4]],
7220                                    [[5, 0, 0, 0]
7221                                     [0, 6, 0, 0]
7222                                     [0, 0, 7, 0]
7223                                     [0, 0, 0, 8]]]
7224
7225which has shape (2, 4, 4)
7226```
7227  }];
7228
7229  let arguments = (ins
7230    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
7231  );
7232
7233  let results = (outs
7234    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = diagonal.shape + [diagonal.shape[-1]]`.}]>:$output
7235  );
7236
7237  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7238}
7239
7240def TF_MatrixDiagPartV3Op : TF_Op<"MatrixDiagPartV3", [NoSideEffect]> {
7241  let summary = "Returns the batched diagonal part of a batched tensor.";
7242
7243  let description = [{
7244Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched
7245`input`.
7246
7247Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`.
7248Let `max_diag_len` be the maximum length among all diagonals to be extracted,
7249`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7250Let `num_diags` be the number of diagonals to extract,
7251`num_diags = k[1] - k[0] + 1`.
7252
7253If `num_diags == 1`, the output tensor is of rank `r - 1` with shape
7254`[I, J, ..., L, max_diag_len]` and values:
7255
7256```
7257diagonal[i, j, ..., l, n]
7258  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7259    padding_value                 ; otherwise.
7260```
7261where `y = max(-k[1], 0)`, `x = max(k[1], 0)`.
7262
7263Otherwise, the output tensor has rank `r` with dimensions
7264`[I, J, ..., L, num_diags, max_diag_len]` with values:
7265
7266```
7267diagonal[i, j, ..., l, m, n]
7268  = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N,
7269    padding_value                 ; otherwise.
7270```
7271where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`.
7272
7273`offset` is zero except when the alignment of the diagonal is to the right.
7274```
7275offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7276                                           and `d >= 0`) or
7277                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7278                                           and `d <= 0`)
7279         0                          ; otherwise
7280```
7281where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7282
7283The input must be at least a matrix.
7284
7285For example:
7286
7287```
7288input = np.array([[[1, 2, 3, 4],  # Input shape: (2, 3, 4)
7289                   [5, 6, 7, 8],
7290                   [9, 8, 7, 6]],
7291                  [[5, 4, 3, 2],
7292                   [1, 2, 3, 4],
7293                   [5, 6, 7, 8]]])
7294
7295# A main diagonal from each batch.
7296tf.matrix_diag_part(input) ==> [[1, 6, 7],  # Output shape: (2, 3)
7297                                [5, 2, 7]]
7298
7299# A superdiagonal from each batch.
7300tf.matrix_diag_part(input, k = 1)
7301  ==> [[2, 7, 6],  # Output shape: (2, 3)
7302       [4, 3, 8]]
7303
7304# A band from each batch.
7305tf.matrix_diag_part(input, k = (-1, 2))
7306  ==> [[[0, 3, 8],  # Output shape: (2, 4, 3)
7307        [2, 7, 6],
7308        [1, 6, 7],
7309        [5, 8, 0]],
7310       [[0, 3, 4],
7311        [4, 3, 8],
7312        [5, 2, 7],
7313        [1, 6, 0]]]
7314
7315# LEFT_RIGHT alignment.
7316tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT")
7317  ==> [[[3, 8, 0],  # Output shape: (2, 4, 3)
7318        [2, 7, 6],
7319        [1, 6, 7],
7320        [0, 5, 8]],
7321       [[3, 4, 0],
7322        [4, 3, 8],
7323        [5, 2, 7],
7324        [0, 1, 6]]]
7325
7326# max_diag_len can be shorter than the main diagonal.
7327tf.matrix_diag_part(input, k = (-2, -1))
7328  ==> [[[5, 8],
7329        [9, 0]],
7330       [[1, 6],
7331        [5, 0]]]
7332
7333# padding_value = 9
7334tf.matrix_diag_part(input, k = (1, 3), padding_value = 9)
7335  ==> [[[9, 9, 4],  # Output shape: (2, 3, 3)
7336        [9, 3, 8],
7337        [2, 7, 6]],
7338       [[9, 9, 2],
7339        [9, 3, 4],
7340        [4, 3, 8]]]
7341
7342```
7343  }];
7344
7345  let arguments = (ins
7346    Arg<TF_Tensor, [{Rank `r` tensor where `r >= 2`.}]>:$input,
7347    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7348diagonal, and negative value means subdiagonals. `k` can be a single integer
7349(for a single diagonal) or a pair of integers specifying the low and high ends
7350of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7351    Arg<TF_Tensor, [{The value to fill the area outside the specified diagonal band with.
7352Default is 0.}]>:$padding_value,
7353
7354    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7355  );
7356
7357  let results = (outs
7358    Res<TF_Tensor, [{The extracted diagonal(s).}]>:$diagonal
7359  );
7360
7361  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7362}
7363
7364def TF_MatrixDiagV2Op : TF_Op<"MatrixDiagV2", [NoSideEffect]> {
7365  let summary = [{
7366Returns a batched diagonal tensor with given batched diagonal values.
7367  }];
7368
7369  let description = [{
7370Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
7371diagonals of a matrix, with everything else padded with `padding`. `num_rows`
7372and `num_cols` specify the dimension of the innermost matrix of the output. If
7373both are not specified, the op assumes the innermost matrix is square and infers
7374its size from `k` and the innermost dimension of `diagonal`. If only one of them
7375is specified, the op assumes the unspecified value is the smallest possible
7376based on other criteria.
7377
7378Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
7379rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
7380diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
7381`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
7382
7383The second innermost dimension of `diagonal` has double meaning.
7384When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
7385[I, J, ..., M], and the output tensor is:
7386
7387```
7388output[i, j, ..., l, m, n]
7389  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
7390    padding_value                             ; otherwise
7391```
7392
7393Otherwise, `M` is treated as the number of diagonals for the matrix in the
7394same batch (`M = k[1]-k[0]+1`), and the output tensor is:
7395
7396```
7397output[i, j, ..., l, m, n]
7398  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7399    padding_value                                     ; otherwise
7400```
7401where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
7402
7403For example:
7404
7405```
7406# The main diagonal.
7407diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
7408                     [5, 6, 7, 8]])
7409tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
7410                               [0, 2, 0, 0],
7411                               [0, 0, 3, 0],
7412                               [0, 0, 0, 4]],
7413                              [[5, 0, 0, 0],
7414                               [0, 6, 0, 0],
7415                               [0, 0, 7, 0],
7416                               [0, 0, 0, 8]]]
7417
7418# A superdiagonal (per batch).
7419diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
7420                     [4, 5, 6]])
7421tf.matrix_diag(diagonal, k = 1)
7422  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
7423        [0, 0, 2, 0],
7424        [0, 0, 0, 3],
7425        [0, 0, 0, 0]],
7426       [[0, 4, 0, 0],
7427        [0, 0, 5, 0],
7428        [0, 0, 0, 6],
7429        [0, 0, 0, 0]]]
7430
7431# A band of diagonals.
7432diagonals = np.array([[[1, 2, 3],  # Input shape: (2, 2, 3)
7433                       [4, 5, 0]],
7434                      [[6, 7, 9],
7435                       [9, 1, 0]]])
7436tf.matrix_diag(diagonals, k = (-1, 0))
7437  ==> [[[1, 0, 0],  # Output shape: (2, 3, 3)
7438        [4, 2, 0],
7439        [0, 5, 3]],
7440       [[6, 0, 0],
7441        [9, 7, 0],
7442        [0, 1, 9]]]
7443
7444# Rectangular matrix.
7445diagonal = np.array([1, 2])  # Input shape: (2)
7446tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
7447  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
7448       [1, 0, 0, 0],
7449       [0, 2, 0, 0]]
7450
7451# Rectangular matrix with inferred num_cols and padding_value = 9.
7452tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
7453  ==> [[9, 9],  # Output shape: (3, 2)
7454       [1, 9],
7455       [9, 2]]
7456```
7457  }];
7458
7459  let arguments = (ins
7460    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
7461    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7462diagonal, and negative value means subdiagonals. `k` can be a single integer
7463(for a single diagonal) or a pair of integers specifying the low and high ends
7464of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7465    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
7466the output matrix is a square matrix and infers the matrix size from k and the
7467innermost dimension of `diagonal`.}]>:$num_rows,
7468    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
7469assumes the output matrix is a square matrix and infers the matrix size from
7470k and the innermost dimension of `diagonal`.}]>:$num_cols,
7471    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
7472Default is 0.}]>:$padding_value
7473  );
7474
7475  let results = (outs
7476    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
7477  );
7478
7479  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7480}
7481
7482def TF_MatrixDiagV3Op : TF_Op<"MatrixDiagV3", [NoSideEffect]> {
7483  let summary = [{
7484Returns a batched diagonal tensor with given batched diagonal values.
7485  }];
7486
7487  let description = [{
7488Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th
7489diagonals of a matrix, with everything else padded with `padding`. `num_rows`
7490and `num_cols` specify the dimension of the innermost matrix of the output. If
7491both are not specified, the op assumes the innermost matrix is square and infers
7492its size from `k` and the innermost dimension of `diagonal`. If only one of them
7493is specified, the op assumes the unspecified value is the smallest possible
7494based on other criteria.
7495
7496Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has
7497rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one
7498diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank
7499`r` with shape `[I, J, ..., L, num_rows, num_cols]`.
7500
7501The second innermost dimension of `diagonal` has double meaning.
7502When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size
7503[I, J, ..., M], and the output tensor is:
7504
7505```
7506output[i, j, ..., l, m, n]
7507  = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper
7508    padding_value                             ; otherwise
7509```
7510
7511Otherwise, `M` is treated as the number of diagonals for the matrix in the
7512same batch (`M = k[1]-k[0]+1`), and the output tensor is:
7513
7514```
7515output[i, j, ..., l, m, n]
7516  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7517    padding_value                                     ; otherwise
7518```
7519where `d = n - m`, `diag_index = [k] - d`, and
7520`index_in_diag = n - max(d, 0) + offset`.
7521
7522`offset` is zero except when the alignment of the diagonal is to the right.
7523```
7524offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7525                                           and `d >= 0`) or
7526                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7527                                           and `d <= 0`)
7528         0                          ; otherwise
7529```
7530where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7531
7532For example:
7533
7534```
7535# The main diagonal.
7536diagonal = np.array([[1, 2, 3, 4],            # Input shape: (2, 4)
7537                     [5, 6, 7, 8]])
7538tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0],  # Output shape: (2, 4, 4)
7539                               [0, 2, 0, 0],
7540                               [0, 0, 3, 0],
7541                               [0, 0, 0, 4]],
7542                              [[5, 0, 0, 0],
7543                               [0, 6, 0, 0],
7544                               [0, 0, 7, 0],
7545                               [0, 0, 0, 8]]]
7546
7547# A superdiagonal (per batch).
7548diagonal = np.array([[1, 2, 3],  # Input shape: (2, 3)
7549                     [4, 5, 6]])
7550tf.matrix_diag(diagonal, k = 1)
7551  ==> [[[0, 1, 0, 0],  # Output shape: (2, 4, 4)
7552        [0, 0, 2, 0],
7553        [0, 0, 0, 3],
7554        [0, 0, 0, 0]],
7555       [[0, 4, 0, 0],
7556        [0, 0, 5, 0],
7557        [0, 0, 0, 6],
7558        [0, 0, 0, 0]]]
7559
7560# A tridiagonal band (per batch).
7561diagonals = np.array([[[0, 8, 9],  # Input shape: (2, 2, 3)
7562                       [1, 2, 3],
7563                       [4, 5, 0]],
7564                      [[0, 2, 3],
7565                       [6, 7, 9],
7566                       [9, 1, 0]]])
7567tf.matrix_diag(diagonals, k = (-1, 1))
7568  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
7569        [4, 2, 9],
7570        [0, 5, 3]],
7571       [[6, 2, 0],
7572        [9, 7, 3],
7573        [0, 1, 9]]]
7574
7575# LEFT_RIGHT alignment.
7576diagonals = np.array([[[8, 9, 0],  # Input shape: (2, 2, 3)
7577                       [1, 2, 3],
7578                       [0, 4, 5]],
7579                      [[2, 3, 0],
7580                       [6, 7, 9],
7581                       [0, 9, 1]]])
7582tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT")
7583  ==> [[[1, 8, 0],  # Output shape: (2, 3, 3)
7584        [4, 2, 9],
7585        [0, 5, 3]],
7586       [[6, 2, 0],
7587        [9, 7, 3],
7588        [0, 1, 9]]]
7589
7590# Rectangular matrix.
7591diagonal = np.array([1, 2])  # Input shape: (2)
7592tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4)
7593  ==> [[0, 0, 0, 0],  # Output shape: (3, 4)
7594       [1, 0, 0, 0],
7595       [0, 2, 0, 0]]
7596
7597# Rectangular matrix with inferred num_cols and padding_value = 9.
7598tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9)
7599  ==> [[9, 9],  # Output shape: (3, 2)
7600       [1, 9],
7601       [9, 2]]
7602
7603```
7604  }];
7605
7606  let arguments = (ins
7607    Arg<TF_Tensor, [{Rank `r`, where `r >= 1`}]>:$diagonal,
7608    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7609diagonal, and negative value means subdiagonals. `k` can be a single integer
7610(for a single diagonal) or a pair of integers specifying the low and high ends
7611of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7612    Arg<TF_Int32Tensor, [{The number of rows of the output matrix. If it is not provided, the op assumes
7613the output matrix is a square matrix and infers the matrix size from k and the
7614innermost dimension of `diagonal`.}]>:$num_rows,
7615    Arg<TF_Int32Tensor, [{The number of columns of the output matrix. If it is not provided, the op
7616assumes the output matrix is a square matrix and infers the matrix size from
7617k and the innermost dimension of `diagonal`.}]>:$num_cols,
7618    Arg<TF_Tensor, [{The number to fill the area outside the specified diagonal band with.
7619Default is 0.}]>:$padding_value,
7620
7621    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7622  );
7623
7624  let results = (outs
7625    Res<TF_Tensor, [{Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise.}]>:$output
7626  );
7627
7628  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7629}
7630
7631def TF_MatrixInverseOp : TF_Op<"MatrixInverse", [NoSideEffect]> {
7632  let summary = [{
7633Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
7634  }];
7635
7636  let description = [{
7637The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
7638form square matrices. The output is a tensor of the same shape as the input
7639containing the inverse for all input submatrices `[..., :, :]`.
7640
7641The op uses LU decomposition with partial pivoting to compute the inverses.
7642
7643If a matrix is not invertible there is no guarantee what the op does. It
7644may detect the condition and raise an exception or it may simply return a
7645garbage result.
7646  }];
7647
7648  let arguments = (ins
7649    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$input,
7650
7651    DefaultValuedAttr<BoolAttr, "false">:$adjoint
7652  );
7653
7654  let results = (outs
7655    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.
7656
7657@compatibility(numpy)
7658Equivalent to np.linalg.inv
7659@end_compatibility}]>:$output
7660  );
7661
7662  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7663}
7664
7665def TF_MatrixSetDiagOp : TF_Op<"MatrixSetDiag", [NoSideEffect]> {
7666  let summary = [{
7667Returns a batched matrix tensor with new batched diagonal values.
7668  }];
7669
7670  let description = [{
7671Given `input` and `diagonal`, this operation returns a tensor with the
7672same shape and values as `input`, except for the main diagonal of the
7673innermost matrices.  These will be overwritten by the values in `diagonal`.
7674
7675The output is computed as follows:
7676
7677Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has
7678`k` dimensions `[I, J, K, ..., min(M, N)]`.  Then the output is a
7679tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where:
7680
7681  * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`.
7682  * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`.
7683  }];
7684
7685  let arguments = (ins
7686    Arg<TF_Tensor, [{Rank `k+1`, where `k >= 1`.}]>:$input,
7687    Arg<TF_Tensor, [{Rank `k`, where `k >= 1`.}]>:$diagonal
7688  );
7689
7690  let results = (outs
7691    Res<TF_Tensor, [{Rank `k+1`, with `output.shape = input.shape`.}]>:$output
7692  );
7693
7694  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7695
7696  let hasCanonicalizer = 1;
7697}
7698
7699def TF_MatrixSetDiagV2Op : TF_Op<"MatrixSetDiagV2", [NoSideEffect]> {
7700  let summary = [{
7701Returns a batched matrix tensor with new batched diagonal values.
7702  }];
7703
7704  let description = [{
7705Given `input` and `diagonal`, this operation returns a tensor with the
7706same shape and values as `input`, except for the specified diagonals of the
7707innermost matrices. These will be overwritten by the values in `diagonal`.
7708
7709`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
7710`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
7711Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
7712`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
7713`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
7714`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7715
7716The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
7717If `k` is scalar or `k[0] == k[1]`:
7718
7719```
7720output[i, j, ..., l, m, n]
7721  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
7722    input[i, j, ..., l, m, n]              ; otherwise
7723```
7724
7725Otherwise,
7726
7727```
7728output[i, j, ..., l, m, n]
7729  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7730    input[i, j, ..., l, m, n]                         ; otherwise
7731```
7732where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`.
7733
7734For example:
7735
7736```
7737# The main diagonal.
7738input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
7739                   [7, 7, 7, 7],
7740                   [7, 7, 7, 7]],
7741                  [[7, 7, 7, 7],
7742                   [7, 7, 7, 7],
7743                   [7, 7, 7, 7]]])
7744diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
7745                     [4, 5, 6]])
7746tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7747                                   [7, 2, 7, 7],
7748                                   [7, 7, 3, 7]],
7749                                  [[4, 7, 7, 7],
7750                                   [7, 5, 7, 7],
7751                                   [7, 7, 6, 7]]]
7752
7753# A superdiagonal (per batch).
7754tf.matrix_set_diag(diagonal, k = 1)
7755  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
7756        [7, 7, 2, 7],
7757        [7, 7, 7, 3]],
7758       [[7, 4, 7, 7],
7759        [7, 7, 5, 7],
7760        [7, 7, 7, 6]]]
7761
7762# A band of diagonals.
7763diagonals = np.array([[[1, 2, 3],  # Diagonal shape: (2, 2, 3)
7764                       [4, 5, 0]],
7765                      [[6, 1, 2],
7766                       [3, 4, 0]]])
7767tf.matrix_set_diag(diagonals, k = (-1, 0))
7768  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7769        [4, 2, 7, 7],
7770        [0, 5, 3, 7]],
7771       [[6, 7, 7, 7],
7772        [3, 1, 7, 7],
7773        [7, 4, 2, 7]]]
7774
7775```
7776  }];
7777
7778  let arguments = (ins
7779    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
7780    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
7781`k >= 1`.}]>:$diagonal,
7782    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7783diagonal, and negative value means subdiagonals. `k` can be a single integer
7784(for a single diagonal) or a pair of integers specifying the low and high ends
7785of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k
7786  );
7787
7788  let results = (outs
7789    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
7790  );
7791
7792  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7793
7794  let hasCanonicalizer = 1;
7795}
7796
7797def TF_MatrixSetDiagV3Op : TF_Op<"MatrixSetDiagV3", [NoSideEffect]> {
7798  let summary = [{
7799Returns a batched matrix tensor with new batched diagonal values.
7800  }];
7801
7802  let description = [{
7803Given `input` and `diagonal`, this operation returns a tensor with the
7804same shape and values as `input`, except for the specified diagonals of the
7805innermost matrices. These will be overwritten by the values in `diagonal`.
7806
7807`input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or
7808`k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`.
7809Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`.
7810`num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`.
7811`max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`,
7812`max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))`
7813
7814The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`.
7815If `k` is scalar or `k[0] == k[1]`:
7816
7817```
7818output[i, j, ..., l, m, n]
7819  = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1]
7820    input[i, j, ..., l, m, n]              ; otherwise
7821```
7822
7823Otherwise,
7824
7825```
7826output[i, j, ..., l, m, n]
7827  = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1]
7828    input[i, j, ..., l, m, n]                         ; otherwise
7829```
7830where `d = n - m`, `diag_index = k[1] - d`, and
7831`index_in_diag = n - max(d, 0) + offset`.
7832
7833`offset` is zero except when the alignment of the diagonal is to the right.
7834```
7835offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT}
7836                                           and `d >= 0`) or
7837                                         (`align` in {LEFT_RIGHT, RIGHT_RIGHT}
7838                                           and `d <= 0`)
7839         0                          ; otherwise
7840```
7841where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`.
7842
7843For example:
7844
7845```
7846# The main diagonal.
7847input = np.array([[[7, 7, 7, 7],              # Input shape: (2, 3, 4)
7848                   [7, 7, 7, 7],
7849                   [7, 7, 7, 7]],
7850                  [[7, 7, 7, 7],
7851                   [7, 7, 7, 7],
7852                   [7, 7, 7, 7]]])
7853diagonal = np.array([[1, 2, 3],               # Diagonal shape: (2, 3)
7854                     [4, 5, 6]])
7855tf.matrix_set_diag(input, diagonal)
7856  ==> [[[1, 7, 7, 7],  # Output shape: (2, 3, 4)
7857        [7, 2, 7, 7],
7858        [7, 7, 3, 7]],
7859       [[4, 7, 7, 7],
7860        [7, 5, 7, 7],
7861        [7, 7, 6, 7]]]
7862
7863# A superdiagonal (per batch).
7864tf.matrix_set_diag(input, diagonal, k = 1)
7865  ==> [[[7, 1, 7, 7],  # Output shape: (2, 3, 4)
7866        [7, 7, 2, 7],
7867        [7, 7, 7, 3]],
7868       [[7, 4, 7, 7],
7869        [7, 7, 5, 7],
7870        [7, 7, 7, 6]]]
7871
7872# A band of diagonals.
7873diagonals = np.array([[[0, 9, 1],  # Diagonal shape: (2, 4, 3)
7874                       [6, 5, 8],
7875                       [1, 2, 3],
7876                       [4, 5, 0]],
7877                      [[0, 1, 2],
7878                       [5, 6, 4],
7879                       [6, 1, 2],
7880                       [3, 4, 0]]])
7881tf.matrix_set_diag(input, diagonals, k = (-1, 2))
7882  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
7883        [4, 2, 5, 1],
7884        [7, 5, 3, 8]],
7885       [[6, 5, 1, 7],
7886        [3, 1, 6, 2],
7887        [7, 4, 2, 4]]]
7888
7889# LEFT_RIGHT alignment.
7890diagonals = np.array([[[9, 1, 0],  # Diagonal shape: (2, 4, 3)
7891                       [6, 5, 8],
7892                       [1, 2, 3],
7893                       [0, 4, 5]],
7894                      [[1, 2, 0],
7895                       [5, 6, 4],
7896                       [6, 1, 2],
7897                       [0, 3, 4]]])
7898tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT")
7899  ==> [[[1, 6, 9, 7],  # Output shape: (2, 3, 4)
7900        [4, 2, 5, 1],
7901        [7, 5, 3, 8]],
7902       [[6, 5, 1, 7],
7903        [3, 1, 6, 2],
7904        [7, 4, 2, 4]]]
7905
7906```
7907  }];
7908
7909  let arguments = (ins
7910    Arg<TF_Tensor, [{Rank `r+1`, where `r >= 1`.}]>:$input,
7911    Arg<TF_Tensor, [{Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`.
7912`k >= 1`.}]>:$diagonal,
7913    Arg<TF_Int32Tensor, [{Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main
7914diagonal, and negative value means subdiagonals. `k` can be a single integer
7915(for a single diagonal) or a pair of integers specifying the low and high ends
7916of a matrix band. `k[0]` must not be larger than `k[1]`.}]>:$k,
7917
7918    DefaultValuedAttr<TF_AnyStrAttrOf<["LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"]>, "RIGHT_LEFT">:$align
7919  );
7920
7921  let results = (outs
7922    Res<TF_Tensor, [{Rank `r+1`, with `output.shape = input.shape`.}]>:$output
7923  );
7924
7925  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7926}
7927
7928def TF_MatrixSolveOp : TF_Op<"MatrixSolve", [NoSideEffect]> {
7929  let summary = "Solves systems of linear equations.";
7930
7931  let description = [{
7932`Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
7933form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
7934a tensor shape `[..., M, K]`.  If `adjoint` is `False` then each output matrix
7935satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
7936If `adjoint` is `True` then each output matrix satisfies
7937`adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
7938  }];
7939
7940  let arguments = (ins
7941    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
7942    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
7943
7944    DefaultValuedAttr<BoolAttr, "false">:$adjoint
7945  );
7946
7947  let results = (outs
7948    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
7949  );
7950
7951  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
7952}
7953
7954def TF_MatrixTriangularSolveOp : TF_Op<"MatrixTriangularSolve", [NoSideEffect]> {
7955  let summary = [{
7956Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
7957  }];
7958
7959  let description = [{
7960`matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
7961square matrices. If `lower` is `True` then the strictly upper triangular part
7962of each inner-most matrix is assumed to be zero and not accessed.
7963If `lower` is False then the strictly lower triangular part of each inner-most
7964matrix is assumed to be zero and not accessed.
7965`rhs` is a tensor of shape `[..., M, N]`.
7966
7967The output is a tensor of shape `[..., M, N]`. If `adjoint` is
7968`True` then the innermost matrices in `output` satisfy matrix equations
7969`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
7970If `adjoint` is `False` then the strictly then the  innermost matrices in
7971`output` satisfy matrix equations
7972`adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
7973
7974Note, the batch shapes for the inputs only need to broadcast.
7975
7976Example:
7977```python
7978
7979a = tf.constant([[3,  0,  0,  0],
7980                 [2,  1,  0,  0],
7981                 [1,  0,  1,  0],
7982                 [1,  1,  1,  1]], dtype=tf.float32)
7983
7984b = tf.constant([[4],
7985                 [2],
7986                 [4],
7987                 [2]], dtype=tf.float32)
7988
7989x = tf.linalg.triangular_solve(a, b, lower=True)
7990x
7991# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
7992# array([[ 1.3333334 ],
7993#        [-0.66666675],
7994#        [ 2.6666665 ],
7995#        [-1.3333331 ]], dtype=float32)>
7996
7997# in python3 one can use `a@x`
7998tf.matmul(a, x)
7999# <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
8000# array([[4.       ],
8001#        [2.       ],
8002#        [4.       ],
8003#        [1.9999999]], dtype=float32)>
8004```
8005  }];
8006
8007  let arguments = (ins
8008    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, M]`.}]>:$matrix,
8009    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$rhs,
8010
8011    DefaultValuedAttr<BoolAttr, "true">:$lower,
8012    DefaultValuedAttr<BoolAttr, "false">:$adjoint
8013  );
8014
8015  let results = (outs
8016    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Shape is `[..., M, K]`.}]>:$output
8017  );
8018
8019  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8020}
8021
8022def TF_MaxOp : TF_Op<"Max", [NoSideEffect]> {
8023  let summary = [{
8024Computes the maximum of elements across dimensions of a tensor.
8025  }];
8026
8027  let description = [{
8028Reduces `input` along the dimensions given in `axis`. Unless
8029`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8030`axis`. If `keep_dims` is true, the reduced dimensions are
8031retained with length 1.
8032  }];
8033
8034  let arguments = (ins
8035    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8036    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8037`[-rank(input), rank(input))`.}]>:$reduction_indices,
8038
8039    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8040  );
8041
8042  let results = (outs
8043    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8044  );
8045
8046  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8047  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8048
8049  let builders = [
8050    OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices,
8051      "BoolAttr":$keep_dims)>
8052  ];
8053}
8054
8055def TF_MaxPoolOp : TF_Op<"MaxPool", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_LayoutSensitiveInterface]> {
8056  let summary = "Performs max pooling on the input.";
8057
8058  let arguments = (ins
8059    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
8060
8061    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8062    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8063    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
8064    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
8065    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
8066  );
8067
8068  let results = (outs
8069    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
8070  );
8071
8072  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8073
8074  let extraClassDeclaration = [{
8075    // TF_FoldOperandsTransposeInterface:
8076    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
8077    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
8078    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
8079    // TF_LayoutSensitiveInterface:
8080    StringRef GetOptimalLayout(const RuntimeDevices& devices);
8081    LogicalResult UpdateDataFormat(StringRef data_format);
8082  }];
8083}
8084
8085def TF_MaxPool3DOp : TF_Op<"MaxPool3D", [NoSideEffect]> {
8086  let summary = "Performs 3D max pooling on the input.";
8087
8088  let arguments = (ins
8089    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Shape `[batch, depth, rows, cols, channels]` tensor to pool over.}]>:$input,
8090
8091    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8092    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8093    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8094    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8095  );
8096
8097  let results = (outs
8098    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The max pooled output tensor.}]>:$output
8099  );
8100
8101  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8102}
8103
8104def TF_MaxPool3DGradOp : TF_Op<"MaxPool3DGrad", [NoSideEffect]> {
8105  let summary = "Computes gradients of 3D max pooling function.";
8106
8107  let arguments = (ins
8108    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original input tensor.}]>:$orig_input,
8109    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{The original output tensor.}]>:$orig_output,
8110    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
8111
8112    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8113    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8114    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8115    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8116  );
8117
8118  let results = (outs
8119    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$output
8120  );
8121
8122  TF_DerivedOperandTypeAttr TInput = TF_DerivedOperandTypeAttr<0>;
8123  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
8124}
8125
8126def TF_MaxPool3DGradGradOp : TF_Op<"MaxPool3DGradGrad", [NoSideEffect]> {
8127  let summary = "Computes second-order gradients of the maxpooling function.";
8128
8129  let arguments = (ins
8130    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8131    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8132    Arg<TF_IntOrFpTensor, [{Output backprop of shape `[batch, depth, rows, cols, channels]`.}]>:$grad,
8133
8134    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$ksize,
8135    Confined<I64ArrayAttr, [ArrayMinCount<5>]>:$strides,
8136    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8137    DefaultValuedAttr<TF_AnyStrAttrOf<["NDHWC", "NCDHW"]>, "NDHWC">:$data_format
8138  );
8139
8140  let results = (outs
8141    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8142  );
8143
8144  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8145}
8146
8147def TF_MaxPoolGradOp : TF_Op<"MaxPoolGrad", [NoSideEffect]> {
8148  let summary = "Computes gradients of the maxpooling function.";
8149
8150  let arguments = (ins
8151    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8152    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8153    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8154
8155    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8156    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8157    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
8158    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
8159    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8160  );
8161
8162  let results = (outs
8163    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8164  );
8165
8166  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8167
8168  let verifier = [{
8169    return Verify(*this);
8170  }];
8171}
8172
8173def TF_MaxPoolGradGradOp : TF_Op<"MaxPoolGradGrad", [NoSideEffect]> {
8174  let summary = "Computes second-order gradients of the maxpooling function.";
8175
8176  let arguments = (ins
8177    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8178    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8179    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8180
8181    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$ksize,
8182    Confined<I64ArrayAttr, [ArrayMinCount<4>]>:$strides,
8183    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8184    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8185  );
8186
8187  let results = (outs
8188    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8189  );
8190
8191  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8192}
8193
8194def TF_MaxPoolGradGradV2Op : TF_Op<"MaxPoolGradGradV2", [NoSideEffect]> {
8195  let summary = "Computes second-order gradients of the maxpooling function.";
8196
8197  let arguments = (ins
8198    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8199    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8200    Arg<TF_IntOrFpTensor, [{4-D.  Gradients of gradients w.r.t. the input of `max_pool`.}]>:$grad,
8201    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8202    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8203input tensor.}]>:$strides,
8204
8205    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8206    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8207  );
8208
8209  let results = (outs
8210    Res<TF_IntOrFpTensor, [{Gradients of gradients w.r.t. the input to `max_pool`.}]>:$output
8211  );
8212
8213  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8214}
8215
8216def TF_MaxPoolGradV2Op : TF_Op<"MaxPoolGradV2", [NoSideEffect]> {
8217  let summary = "Computes gradients of the maxpooling function.";
8218
8219  let arguments = (ins
8220    Arg<TF_IntOrFpTensor, [{The original input tensor.}]>:$orig_input,
8221    Arg<TF_IntOrFpTensor, [{The original output tensor.}]>:$orig_output,
8222    Arg<TF_IntOrFpTensor, [{4-D.  Gradients w.r.t. the output of `max_pool`.}]>:$grad,
8223    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8224    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8225input tensor.}]>:$strides,
8226
8227    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8228    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format
8229  );
8230
8231  let results = (outs
8232    Res<TF_IntOrFpTensor, [{Gradients w.r.t. the input to `max_pool`.}]>:$output
8233  );
8234
8235  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8236}
8237
8238def TF_MaxPoolV2Op : TF_Op<"MaxPoolV2", [NoSideEffect]> {
8239  let summary = "Performs max pooling on the input.";
8240
8241  let arguments = (ins
8242    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{4-D input to pool over.}]>:$input,
8243    Arg<TF_Int32Tensor, [{The size of the window for each dimension of the input tensor.}]>:$ksize,
8244    Arg<TF_Int32Tensor, [{The stride of the sliding window for each dimension of the
8245input tensor.}]>:$strides,
8246
8247    TF_AnyStrAttrOf<["SAME", "VALID"]>:$padding,
8248    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
8249  );
8250
8251  let results = (outs
8252    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint8]>, [{The max pooled output tensor.}]>:$output
8253  );
8254
8255  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8256}
8257
8258def TF_MeanOp : TF_Op<"Mean", [NoSideEffect, TF_FoldOperandsTransposeInterface]> {
8259  let summary = "Computes the mean of elements across dimensions of a tensor.";
8260
8261  let description = [{
8262Reduces `input` along the dimensions given in `axis`. Unless
8263`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8264`axis`. If `keep_dims` is true, the reduced dimensions are
8265retained with length 1.
8266  }];
8267
8268  let arguments = (ins
8269    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8270    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8271`[-rank(input), rank(input))`.}]>:$reduction_indices,
8272
8273    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8274  );
8275
8276  let results = (outs
8277    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8278  );
8279
8280  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8281  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8282
8283  let extraClassDeclaration = [{
8284    // TF_FoldOperandsTransposeInterface:
8285    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
8286    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {}; }
8287    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
8288  }];
8289}
8290
8291def TF_MergeSummaryOp : TF_Op<"MergeSummary", [NoSideEffect, SameOperandsAndResultType]> {
8292  let summary = "Merges summaries.";
8293
8294  let description = [{
8295This op creates a
8296[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
8297protocol buffer that contains the union of all the values in the input
8298summaries.
8299
8300When the Op is run, it reports an `InvalidArgument` error if multiple values
8301in the summaries to merge use the same tag.
8302  }];
8303
8304  let arguments = (ins
8305    Arg<Variadic<TF_StrTensor>, [{Can be of any shape.  Each must contain serialized `Summary` protocol
8306buffers.}]>:$inputs
8307  );
8308
8309  let results = (outs
8310    Res<TF_StrTensor, [{Scalar. Serialized `Summary` protocol buffer.}]>:$summary
8311  );
8312
8313  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
8314}
8315
8316def TF_MergeV2CheckpointsOp : TF_Op<"MergeV2Checkpoints", []> {
8317  let summary = [{
8318V2 format specific: merges the metadata files of sharded checkpoints.  The
8319  }];
8320
8321  let description = [{
8322result is one logical checkpoint, with one physical metadata file and renamed
8323data files.
8324
8325Intended for "grouping" multiple checkpoints in a sharded checkpoint setup.
8326
8327If delete_old_dirs is true, attempts to delete recursively the dirname of each
8328path in the input checkpoint_prefixes.  This is useful when those paths are non
8329user-facing temporary locations.
8330  }];
8331
8332  let arguments = (ins
8333    Arg<TF_StrTensor, [{prefixes of V2 checkpoints to merge.}]>:$checkpoint_prefixes,
8334    Arg<TF_StrTensor, [{scalar.  The desired final prefix.  Allowed to be the same
8335as one of the checkpoint_prefixes.}]>:$destination_prefix,
8336
8337    DefaultValuedAttr<BoolAttr, "true">:$delete_old_dirs
8338  );
8339
8340  let results = (outs);
8341}
8342
8343def TF_MinOp : TF_Op<"Min", [NoSideEffect]> {
8344  let summary = [{
8345Computes the minimum of elements across dimensions of a tensor.
8346  }];
8347
8348  let description = [{
8349Reduces `input` along the dimensions given in `axis`. Unless
8350`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8351`axis`. If `keep_dims` is true, the reduced dimensions are
8352retained with length 1.
8353  }];
8354
8355  let arguments = (ins
8356    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
8357    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
8358`[-rank(input), rank(input))`.}]>:$reduction_indices,
8359
8360    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
8361  );
8362
8363  let results = (outs
8364    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint16, TF_Qint32, TF_Qint8, TF_Quint16, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
8365  );
8366
8367  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8368  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
8369}
8370
8371def TF_MinimumOp : TF_Op<"Minimum", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
8372                   WithBroadcastableBinOpBuilder {
8373  let summary = "Returns the min of x and y (i.e. x < y ? x : y) element-wise.";
8374
8375  let description = [{
8376*NOTE*: `Minimum` supports broadcasting. More about broadcasting
8377[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8378  }];
8379
8380  let arguments = (ins
8381    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$x,
8382    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$y
8383  );
8384
8385  let results = (outs
8386    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$z
8387  );
8388
8389  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8390}
8391
8392def TF_MirrorPadOp : TF_Op<"MirrorPad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
8393  let summary = "Pads a tensor with mirrored values.";
8394
8395  let description = [{
8396This operation pads a `input` with mirrored values according to the `paddings`
8397you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is
8398the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
8399how many values to add before the contents of `input` in that dimension, and
8400`paddings[D, 1]` indicates how many values to add after the contents of `input`
8401in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater
8402than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true
8403(if false, respectively).
8404
8405The padded size of each dimension D of the output is:
8406
8407`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
8408
8409For example:
8410
8411```
8412# 't' is [[1, 2, 3], [4, 5, 6]].
8413# 'paddings' is [[1, 1]], [2, 2]].
8414# 'mode' is SYMMETRIC.
8415# rank of 't' is 2.
8416pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2]
8417                      [2, 1, 1, 2, 3, 3, 2]
8418                      [5, 4, 4, 5, 6, 6, 5]
8419                      [5, 4, 4, 5, 6, 6, 5]]
8420```
8421  }];
8422
8423  let arguments = (ins
8424    Arg<TF_Tensor, [{The input tensor to be padded.}]>:$input,
8425    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
8426rows must be the same as the rank of `input`.}]>:$paddings,
8427
8428    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
8429  );
8430
8431  let results = (outs
8432    Res<TF_Tensor, [{The padded tensor.}]>:$output
8433  );
8434
8435  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8436  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
8437}
8438
8439def TF_MirrorPadGradOp : TF_Op<"MirrorPadGrad", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
8440  let summary = [{
8441Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor.
8442  }];
8443
8444  let description = [{
8445This operation folds the padded areas of `input` by `MirrorPad` according to the
8446`paddings` you specify. `paddings` must be the same as `paddings` argument
8447given to the corresponding `MirrorPad` op.
8448
8449The folded size of each dimension D of the output is:
8450
8451`input.dim_size(D) - paddings(D, 0) - paddings(D, 1)`
8452
8453For example:
8454
8455```
8456# 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]].
8457# 'paddings' is [[0, 1]], [0, 1]].
8458# 'mode' is SYMMETRIC.
8459# rank of 't' is 2.
8460pad(t, paddings) ==> [[ 1,  5]
8461                      [11, 28]]
8462```
8463  }];
8464
8465  let arguments = (ins
8466    Arg<TF_Tensor, [{The input tensor to be folded.}]>:$input,
8467    Arg<TF_I32OrI64Tensor, [{A two-column matrix specifying the padding sizes. The number of
8468rows must be the same as the rank of `input`.}]>:$paddings,
8469
8470    TF_AnyStrAttrOf<["REFLECT", "SYMMETRIC"]>:$mode
8471  );
8472
8473  let results = (outs
8474    Res<TF_Tensor, [{The folded tensor.}]>:$output
8475  );
8476
8477  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8478  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
8479}
8480
8481def TF_MlirLocalVarOp : TF_Op<"MlirLocalVarOp", []> {
8482  let summary = "Creates a handle to an in-scope variable.";
8483
8484  let description = [{
8485Used by internal passes for temporary representation of local state, which will
8486be eventually removed.
8487  }];
8488
8489  let arguments = (ins);
8490
8491  let results = (outs
8492    Res<TF_ResourceTensor, "", [TF_VariableAlloc]>:$resource
8493  );
8494}
8495
8496def TF_MlirPassthroughOp : TF_Op<"MlirPassthroughOp", [NoSideEffect]> {
8497  let summary = [{
8498Wraps an arbitrary MLIR computation expressed as a module with a main() function.
8499  }];
8500
8501  let description = [{
8502This operation does not have an associated kernel and is not intended to be
8503executed in a regular TensorFlow session. Instead it is intended to be used for
8504testing or for special case where a user intends to pass custom MLIR computation
8505through a TensorFlow graph with the intent of having custom tooling processing
8506it downstream (when targeting a different environment, like TensorFlow lite for
8507example).
8508The MLIR module is expected to have a main() function that will be used as an
8509entry point. The inputs to the operations will be passed as argument to the
8510main() function and the returned values of the main function mapped to the
8511outputs.
8512Example usage:
8513
8514```
8515import tensorflow as tf
8516from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op
8517
8518mlir_module = '''python
8519func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {
8520   %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>
8521   return %ret : tensor<10x10xf32>
8522}
8523'''
8524
8525@tf.function
8526def foo(x, y):
8527  return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32])
8528
8529graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), tf.TensorSpec([10], tf.float32)).graph.as_graph_def()
8530```
8531  }];
8532
8533  let arguments = (ins
8534    Variadic<TF_Tensor>:$inputs,
8535
8536    StrAttr:$mlir_module
8537  );
8538
8539  let results = (outs
8540    Variadic<TF_Tensor>:$outputs
8541  );
8542
8543  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
8544  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
8545}
8546
8547def TF_ModOp : TF_Op<"Mod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
8548               WithBroadcastableBinOpBuilder {
8549  let summary = [{
8550Returns element-wise remainder of division. This emulates C semantics in that
8551  }];
8552
8553  let description = [{
8554the result here is consistent with a truncating divide. E.g.
8555`tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
8556
8557*NOTE*: `Mod` supports broadcasting. More about broadcasting
8558[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8559  }];
8560
8561  let arguments = (ins
8562    TF_FpOrI32OrI64Tensor:$x,
8563    TF_FpOrI32OrI64Tensor:$y
8564  );
8565
8566  let results = (outs
8567    TF_FpOrI32OrI64Tensor:$z
8568  );
8569
8570  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8571}
8572
8573def TF_MulOp : TF_Op<"Mul", [Commutative, NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
8574               WithBroadcastableBinOpBuilder {
8575  let summary = "Returns x * y element-wise.";
8576
8577  let description = [{
8578*NOTE*: `Multiply` supports broadcasting. More about broadcasting
8579[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8580  }];
8581
8582  let arguments = (ins
8583    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
8584    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
8585  );
8586
8587  let results = (outs
8588    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
8589  );
8590
8591  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8592
8593  let hasFolder = 1;
8594}
8595
8596def TF_MulNoNanOp : TF_Op<"MulNoNan", [NoSideEffect, ResultsBroadcastableShape]>,
8597                    WithBroadcastableBinOpBuilder {
8598  let summary = [{
8599Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN.
8600  }];
8601
8602  let description = [{
8603*NOTE*: `MulNoNan` supports broadcasting. More about broadcasting
8604[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8605  }];
8606
8607  let arguments = (ins
8608    TF_FpOrComplexTensor:$x,
8609    TF_FpOrComplexTensor:$y
8610  );
8611
8612  let results = (outs
8613    TF_FpOrComplexTensor:$z
8614  );
8615
8616  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8617}
8618
8619def TF_MultiDeviceIteratorOp : TF_Op<"MultiDeviceIterator", []> {
8620  let summary = "Creates a MultiDeviceIterator resource.";
8621
8622  let arguments = (ins
8623    Confined<StrArrayAttr, [ArrayMinCount<1>]>:$devices,
8624    StrAttr:$shared_name,
8625    StrAttr:$container,
8626    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
8627    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
8628  );
8629
8630  let results = (outs
8631    Res<TF_ResourceTensor, [{Handle to the resource created.}], [TF_DatasetIteratorAlloc]>:$handle
8632  );
8633}
8634
8635def TF_MultiDeviceIteratorFromStringHandleOp : TF_Op<"MultiDeviceIteratorFromStringHandle", []> {
8636  let summary = [{
8637Generates a MultiDeviceIterator resource from its provided string handle.
8638  }];
8639
8640  let arguments = (ins
8641    Arg<TF_StrTensor, [{String representing the resource.}]>:$string_handle,
8642
8643    DefaultValuedAttr<TypeArrayAttr, "{}">:$output_types,
8644    DefaultValuedAttr<TF_ShapeAttrArray, "{}">:$output_shapes
8645  );
8646
8647  let results = (outs
8648    Res<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorAlloc]>:$multi_device_iterator
8649  );
8650}
8651
8652def TF_MultiDeviceIteratorGetNextFromShardOp : TF_Op<"MultiDeviceIteratorGetNextFromShard", []> {
8653  let summary = "Gets next element for the provided shard number.";
8654
8655  let arguments = (ins
8656    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead, TF_DatasetIteratorWrite]>:$multi_device_iterator,
8657    Arg<TF_Int32Tensor, [{Integer representing which shard to fetch data for.}]>:$shard_num,
8658    Arg<TF_Int64Tensor, [{Which incarnation of the MultiDeviceIterator is running.}]>:$incarnation_id
8659  );
8660
8661  let results = (outs
8662    Res<Variadic<TF_Tensor>, [{Result of the get_next on the dataset.}]>:$components
8663  );
8664
8665  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
8666  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
8667}
8668
8669def TF_MultiDeviceIteratorInitOp : TF_Op<"MultiDeviceIteratorInit", []> {
8670  let summary = "Initializes the multi device iterator with the given dataset.";
8671
8672  let arguments = (ins
8673    Arg<TF_VariantTensor, [{Dataset to be iterated upon.}]>:$dataset,
8674    Arg<TF_ResourceTensor, [{A MultiDeviceIteratorResource.}], [TF_DatasetIteratorWrite]>:$multi_device_iterator,
8675    Arg<TF_Int64Tensor, [{The maximum size of the host side per device buffer to keep.}]>:$max_buffer_size
8676  );
8677
8678  let results = (outs
8679    Res<TF_Int64Tensor, [{An int64 indicating which incarnation of the MultiDeviceIterator
8680is running.}]>:$incarnation_id
8681  );
8682}
8683
8684def TF_MultiDeviceIteratorToStringHandleOp : TF_Op<"MultiDeviceIteratorToStringHandle", []> {
8685  let summary = "Produces a string handle for the given MultiDeviceIterator.";
8686
8687  let arguments = (ins
8688    Arg<TF_ResourceTensor, [{A MultiDeviceIterator resource.}], [TF_DatasetIteratorRead]>:$multi_device_iterator
8689  );
8690
8691  let results = (outs
8692    Res<TF_StrTensor, [{A string representing the resource.}]>:$string_handle
8693  );
8694}
8695
8696def TF_MultinomialOp : TF_Op<"Multinomial", [TF_CannotDuplicate]> {
8697  let summary = "Draws samples from a multinomial distribution.";
8698
8699  let arguments = (ins
8700    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
8701represents the unnormalized log probabilities for all classes.}]>:$logits,
8702    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
8703
8704    DefaultValuedAttr<I64Attr, "0">:$seed,
8705    DefaultValuedAttr<I64Attr, "0">:$seed2
8706  );
8707
8708  let results = (outs
8709    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
8710contains the drawn class labels with range `[0, num_classes)`.}]>:$output
8711  );
8712
8713  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8714  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
8715}
8716
8717def TF_MutableDenseHashTableV2Op : TF_Op<"MutableDenseHashTableV2", []> {
8718  let summary = [{
8719Creates an empty hash table that uses tensors as the backing store.
8720  }];
8721
8722  let description = [{
8723It uses "open addressing" with quadratic reprobing to resolve
8724collisions.
8725
8726This op creates a mutable hash table, specifying the type of its keys and
8727values. Each value must be a scalar. Data can be inserted into the table using
8728the insert operations. It does not support the initialization operation.
8729  }];
8730
8731  let arguments = (ins
8732    Arg<TF_Tensor, [{The key used to represent empty key buckets internally. Must not
8733be used in insert or lookup operations.}]>:$empty_key,
8734    TF_Tensor:$deleted_key,
8735
8736    StrAttr:$container,
8737    StrAttr:$shared_name,
8738    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8739    TypeAttr:$value_dtype,
8740    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape,
8741    DefaultValuedAttr<I64Attr, "131072">:$initial_num_buckets,
8742    DefaultValuedAttr<F32Attr, "0.8f">:$max_load_factor
8743  );
8744
8745  let results = (outs
8746    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8747  );
8748
8749  TF_DerivedOperandTypeAttr key_dtype = TF_DerivedOperandTypeAttr<0>;
8750}
8751
8752def TF_MutableHashTableOfTensorsV2Op : TF_Op<"MutableHashTableOfTensorsV2", []> {
8753  let summary = "Creates an empty hash table.";
8754
8755  let description = [{
8756This op creates a mutable hash table, specifying the type of its keys and
8757values. Each value must be a vector. Data can be inserted into the table using
8758the insert operations. It does not support the initialization operation.
8759  }];
8760
8761  let arguments = (ins
8762    StrAttr:$container,
8763    StrAttr:$shared_name,
8764    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8765    TypeAttr:$key_dtype,
8766    TypeAttr:$value_dtype,
8767    DefaultValuedAttr<TF_ShapeAttr, "llvm::ArrayRef<int64_t>({})">:$value_shape
8768  );
8769
8770  let results = (outs
8771    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8772  );
8773}
8774
8775def TF_MutableHashTableV2Op : TF_Op<"MutableHashTableV2", []> {
8776  let summary = "Creates an empty hash table.";
8777
8778  let description = [{
8779This op creates a mutable hash table, specifying the type of its keys and
8780values. Each value must be a scalar. Data can be inserted into the table using
8781the insert operations. It does not support the initialization operation.
8782  }];
8783
8784  let arguments = (ins
8785    StrAttr:$container,
8786    StrAttr:$shared_name,
8787    DefaultValuedAttr<BoolAttr, "false">:$use_node_name_sharing,
8788    TypeAttr:$key_dtype,
8789    TypeAttr:$value_dtype
8790  );
8791
8792  let results = (outs
8793    Res<TF_ResourceTensor, [{Handle to a table.}], [TF_LookupTableAlloc]>:$table_handle
8794  );
8795}
8796
8797def TF_NdtriOp : TF_Op<"Ndtri", [NoSideEffect]> {
8798  let summary = "";
8799
8800  let arguments = (ins
8801    TF_FloatTensor:$x
8802  );
8803
8804  let results = (outs
8805    TF_FloatTensor:$y
8806  );
8807
8808  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8809}
8810
8811def TF_NegOp : TF_Op<"Neg", [Involution, NoSideEffect, SameOperandsAndResultType, TF_CwiseUnary]> {
8812  let summary = "Computes numerical negative value element-wise.";
8813
8814  let description = [{
8815I.e., \\(y = -x\\).
8816  }];
8817
8818  let arguments = (ins
8819    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
8820  );
8821
8822  let results = (outs
8823    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
8824  );
8825
8826  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8827}
8828
8829def TF_NextAfterOp : TF_Op<"NextAfter", [NoSideEffect, ResultsBroadcastableShape]>,
8830                     WithBroadcastableBinOpBuilder {
8831  let summary = [{
8832Returns the next representable value of `x1` in the direction of `x2`, element-wise.
8833  }];
8834
8835  let description = [{
8836This operation returns the same result as the C++ std::nextafter function.
8837
8838It can also return a subnormal number.
8839
8840@compatibility(cpp)
8841Equivalent to C++ std::nextafter function.
8842@end_compatibility
8843  }];
8844
8845  let arguments = (ins
8846    TF_F32OrF64Tensor:$x1,
8847    TF_F32OrF64Tensor:$x2
8848  );
8849
8850  let results = (outs
8851    TF_F32OrF64Tensor:$output
8852  );
8853
8854  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8855}
8856
8857def TF_NoOp : TF_Op<"NoOp", [NoSideEffect]> {
8858  let summary = "Does nothing. Only useful as a placeholder for control edges.";
8859
8860  let arguments = (ins);
8861
8862  let results = (outs);
8863}
8864
8865def TF_NonMaxSuppressionV3Op : TF_Op<"NonMaxSuppressionV3", [NoSideEffect]> {
8866  let summary = [{
8867Greedily selects a subset of bounding boxes in descending order of score,
8868  }];
8869
8870  let description = [{
8871pruning away boxes that have high intersection-over-union (IOU) overlap
8872with previously selected boxes.  Bounding boxes with score less than
8873`score_threshold` are removed.  Bounding boxes are supplied as
8874[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8875diagonal pair of box corners and the coordinates can be provided as normalized
8876(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8877is agnostic to where the origin is in the coordinate system and more
8878generally is invariant to orthogonal transformations and translations
8879of the coordinate system; thus translating or reflections of the coordinate
8880system result in the same boxes being selected by the algorithm.
8881The output of this operation is a set of integers indexing into the input
8882collection of bounding boxes representing the selected boxes.  The bounding
8883box coordinates corresponding to the selected indices can then be obtained
8884using the `tf.gather operation`.  For example:
8885  selected_indices = tf.image.non_max_suppression_v2(
8886      boxes, scores, max_output_size, iou_threshold, score_threshold)
8887  selected_boxes = tf.gather(boxes, selected_indices)
8888  }];
8889
8890  let arguments = (ins
8891    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8892    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8893score corresponding to each box (each row of boxes).}]>:$scores,
8894    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8895boxes to be selected by non max suppression.}]>:$max_output_size,
8896    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
8897boxes overlap too much with respect to IOU.}]>:$iou_threshold,
8898    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
8899boxes based on score.}]>:$score_threshold
8900  );
8901
8902  let results = (outs
8903    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
8904indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices
8905  );
8906
8907  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
8908  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8909
8910  let hasCanonicalizer = 1;
8911}
8912
8913def TF_NonMaxSuppressionV4Op : TF_Op<"NonMaxSuppressionV4", [NoSideEffect]> {
8914  let summary = [{
8915Greedily selects a subset of bounding boxes in descending order of score,
8916  }];
8917
8918  let description = [{
8919pruning away boxes that have high intersection-over-union (IOU) overlap
8920with previously selected boxes.  Bounding boxes with score less than
8921`score_threshold` are removed.  Bounding boxes are supplied as
8922[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8923diagonal pair of box corners and the coordinates can be provided as normalized
8924(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8925is agnostic to where the origin is in the coordinate system and more
8926generally is invariant to orthogonal transformations and translations
8927of the coordinate system; thus translating or reflections of the coordinate
8928system result in the same boxes being selected by the algorithm.
8929The output of this operation is a set of integers indexing into the input
8930collection of bounding boxes representing the selected boxes.  The bounding
8931box coordinates corresponding to the selected indices can then be obtained
8932using the `tf.gather operation`.  For example:
8933  selected_indices = tf.image.non_max_suppression_v2(
8934      boxes, scores, max_output_size, iou_threshold, score_threshold)
8935  selected_boxes = tf.gather(boxes, selected_indices)
8936  }];
8937
8938  let arguments = (ins
8939    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8940    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8941score corresponding to each box (each row of boxes).}]>:$scores,
8942    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8943boxes to be selected by non max suppression.}]>:$max_output_size,
8944    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
8945boxes overlap too much with respect to IOU.}]>:$iou_threshold,
8946    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
8947boxes based on score.}]>:$score_threshold,
8948
8949    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
8950  );
8951
8952  let results = (outs
8953    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
8954indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
8955    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
8956`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
8957  );
8958
8959  TF_DerivedOperandTypeAttr T_threshold = TF_DerivedOperandTypeAttr<3>;
8960  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
8961}
8962
8963def TF_NonMaxSuppressionV5Op : TF_Op<"NonMaxSuppressionV5", [NoSideEffect]> {
8964  let summary = [{
8965Greedily selects a subset of bounding boxes in descending order of score,
8966  }];
8967
8968  let description = [{
8969pruning away boxes that have high intersection-over-union (IOU) overlap
8970with previously selected boxes.  Bounding boxes with score less than
8971`score_threshold` are removed.  Bounding boxes are supplied as
8972[y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any
8973diagonal pair of box corners and the coordinates can be provided as normalized
8974(i.e., lying in the interval [0, 1]) or absolute.  Note that this algorithm
8975is agnostic to where the origin is in the coordinate system and more
8976generally is invariant to orthogonal transformations and translations
8977of the coordinate system; thus translating or reflections of the coordinate
8978system result in the same boxes being selected by the algorithm.
8979The output of this operation is a set of integers indexing into the input
8980collection of bounding boxes representing the selected boxes.  The bounding
8981box coordinates corresponding to the selected indices can then be obtained
8982using the `tf.gather operation`.  For example:
8983  selected_indices = tf.image.non_max_suppression_v2(
8984      boxes, scores, max_output_size, iou_threshold, score_threshold)
8985  selected_boxes = tf.gather(boxes, selected_indices)
8986This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f.
8987Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
8988of other overlapping boxes instead of directly causing them to be pruned.
8989To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be
8990larger than 0.
8991  }];
8992
8993  let arguments = (ins
8994    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 2-D float tensor of shape `[num_boxes, 4]`.}]>:$boxes,
8995    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[num_boxes]` representing a single
8996score corresponding to each box (each row of boxes).}]>:$scores,
8997    Arg<TF_Int32Tensor, [{A scalar integer tensor representing the maximum number of
8998boxes to be selected by non max suppression.}]>:$max_output_size,
8999    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding whether
9000boxes overlap too much with respect to IOU.}]>:$iou_threshold,
9001    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the threshold for deciding when to remove
9002boxes based on score.}]>:$score_threshold,
9003    Arg<TensorOf<[TF_Float16, TF_Float32]>, [{A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et
9004al (c.f. https://arxiv.org/abs/1704.04503).  When `soft_nms_sigma=0.0` (which
9005is default), we fall back to standard (hard) NMS.}]>:$soft_nms_sigma,
9006
9007    DefaultValuedAttr<BoolAttr, "false">:$pad_to_max_output_size
9008  );
9009
9010  let results = (outs
9011    Res<TF_Int32Tensor, [{A 1-D integer tensor of shape `[M]` representing the selected
9012indices from the boxes tensor, where `M <= max_output_size`.}]>:$selected_indices,
9013    Res<TensorOf<[TF_Float16, TF_Float32]>, [{A 1-D float tensor of shape `[M]` representing the corresponding
9014scores for each selected box, where `M <= max_output_size`.  Scores only differ
9015from corresponding input scores when using Soft NMS (i.e. when
9016`soft_nms_sigma>0`)}]>:$selected_scores,
9017    Res<TF_Int32Tensor, [{A 0-D integer tensor representing the number of valid elements in
9018`selected_indices`, with the valid elements appearing first.}]>:$valid_outputs
9019  );
9020
9021  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9022}
9023
9024def TF_NotEqualOp : TF_Op<"NotEqual", [Commutative, NoSideEffect]> {
9025  let summary = "Returns the truth value of (x != y) element-wise.";
9026
9027  let description = [{
9028*NOTE*: `NotEqual` supports broadcasting. More about broadcasting
9029[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
9030  }];
9031
9032  let arguments = (ins
9033    TF_Tensor:$x,
9034    TF_Tensor:$y,
9035
9036    DefaultValuedAttr<BoolAttr, "true">:$incompatible_shape_error
9037  );
9038
9039  let results = (outs
9040    TF_BoolTensor:$z
9041  );
9042
9043  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9044
9045  let builders = [
9046    OpBuilderDAG<(ins "Value":$x, "Value":$y,
9047      "BoolAttr":$incompatible_shape_error)>
9048  ];
9049
9050  let verifier = [{
9051    return Verify(*this);
9052  }];
9053}
9054
9055def TF_OneHotOp : TF_Op<"OneHot", [NoSideEffect]> {
9056  let summary = "Returns a one-hot tensor.";
9057
9058  let description = [{
9059The locations represented by indices in `indices` take value `on_value`,
9060while all other locations take value `off_value`.
9061
9062If the input `indices` is rank `N`, the output will have rank `N+1`,
9063The new axis is created at dimension `axis` (default: the new axis is
9064appended at the end).
9065
9066If `indices` is a scalar the output shape will be a vector of length `depth`.
9067
9068If `indices` is a vector of length `features`, the output shape will be:
9069```
9070  features x depth if axis == -1
9071  depth x features if axis == 0
9072```
9073
9074If `indices` is a matrix (batch) with shape `[batch, features]`,
9075the output shape will be:
9076```
9077  batch x features x depth if axis == -1
9078  batch x depth x features if axis == 1
9079  depth x batch x features if axis == 0
9080```
9081
9082
9083Examples
9084=========
9085
9086Suppose that
9087```
9088  indices = [0, 2, -1, 1]
9089  depth = 3
9090  on_value = 5.0
9091  off_value = 0.0
9092  axis = -1
9093```
9094
9095Then output is `[4 x 3]`:
9096```
9097output =
9098  [5.0 0.0 0.0]  // one_hot(0)
9099  [0.0 0.0 5.0]  // one_hot(2)
9100  [0.0 0.0 0.0]  // one_hot(-1)
9101  [0.0 5.0 0.0]  // one_hot(1)
9102```
9103
9104Suppose that
9105```
9106  indices = [0, 2, -1, 1]
9107  depth = 3
9108  on_value = 0.0
9109  off_value = 3.0
9110  axis = 0
9111```
9112
9113Then output is `[3 x 4]`:
9114```
9115output =
9116  [0.0 3.0 3.0 3.0]
9117  [3.0 3.0 3.0 0.0]
9118  [3.0 3.0 3.0 3.0]
9119  [3.0 0.0 3.0 3.0]
9120//  ^                one_hot(0)
9121//      ^            one_hot(2)
9122//          ^        one_hot(-1)
9123//              ^    one_hot(1)
9124```
9125
9126Suppose that
9127```
9128  indices = [[0, 2], [1, -1]]
9129  depth = 3
9130  on_value = 1.0
9131  off_value = 0.0
9132  axis = -1
9133```
9134
9135Then output is `[2 x 2 x 3]`:
9136```
9137output =
9138  [
9139    [1.0, 0.0, 0.0]  // one_hot(0)
9140    [0.0, 0.0, 1.0]  // one_hot(2)
9141  ][
9142    [0.0, 1.0, 0.0]  // one_hot(1)
9143    [0.0, 0.0, 0.0]  // one_hot(-1)
9144  ]
9145```
9146  }];
9147
9148  let arguments = (ins
9149    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint8]>, [{A tensor of indices.}]>:$indices,
9150    Arg<TF_Int32Tensor, [{A scalar defining the depth of the one hot dimension.}]>:$depth,
9151    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] = i`.}]>:$on_value,
9152    Arg<TF_Tensor, [{A scalar defining the value to fill in output when `indices[j] != i`.}]>:$off_value,
9153
9154    DefaultValuedAttr<I64Attr, "-1">:$axis
9155  );
9156
9157  let results = (outs
9158    Res<TF_Tensor, [{The one-hot tensor.}]>:$output
9159  );
9160
9161  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
9162  TF_DerivedOperandTypeAttr TI = TF_DerivedOperandTypeAttr<0>;
9163
9164  let builders = [
9165    OpBuilderDAG<(ins "Value":$indices, "Value":$depth, "Value":$on_value,
9166      "Value":$off_value, "IntegerAttr":$axis)>
9167  ];
9168
9169  let verifier = [{
9170    return Verify(*this);
9171  }];
9172}
9173
9174def TF_OneShotIteratorOp : TF_Op<"OneShotIterator", []> {
9175  let summary = [{
9176Makes a "one-shot" iterator that can be iterated only once.
9177  }];
9178
9179  let description = [{
9180A one-shot iterator bundles the logic for defining the dataset and
9181the state of the iterator in a single op, which allows simple input
9182pipelines to be defined without an additional initialization
9183("MakeIterator") step.
9184
9185One-shot iterators have the following limitations:
9186
9187* They do not support parameterization: all logic for creating the underlying
9188  dataset must be bundled in the `dataset_factory` function.
9189* They are not resettable. Once a one-shot iterator reaches the end of its
9190  underlying dataset, subsequent "IteratorGetNext" operations on that
9191  iterator will always produce an `OutOfRange` error.
9192
9193For greater flexibility, use "Iterator" and "MakeIterator" to define
9194an iterator using an arbitrary subgraph, which may capture tensors
9195(including fed values) as parameters, and which may be reset multiple
9196times by rerunning "MakeIterator".
9197  }];
9198
9199  let arguments = (ins
9200    SymbolRefAttr:$dataset_factory,
9201    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
9202    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
9203    StrAttr:$container,
9204    StrAttr:$shared_name
9205  );
9206
9207  let results = (outs
9208    Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to an "IteratorGetNext"
9209op.}], [TF_DatasetIteratorAlloc]>:$handle
9210  );
9211}
9212
9213def TF_OnesLikeOp : TF_Op<"OnesLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
9214  let summary = "Returns a tensor of ones with the same shape and type as x.";
9215
9216  let arguments = (ins
9217    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of type T.}]>:$x
9218  );
9219
9220  let results = (outs
9221    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{a tensor of the same shape and type as x but filled with ones.}]>:$y
9222  );
9223
9224  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9225}
9226
9227def TF_OptionalGetValueOp : TF_Op<"OptionalGetValue", [NoSideEffect]> {
9228  let summary = [{
9229Returns the value stored in an Optional variant or raises an error if none exists.
9230  }];
9231
9232  let arguments = (ins
9233    TF_VariantTensor:$optional
9234  );
9235
9236  let results = (outs
9237    Variadic<TF_Tensor>:$components
9238  );
9239
9240  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
9241  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
9242}
9243
9244def TF_OptionalHasValueOp : TF_Op<"OptionalHasValue", [NoSideEffect]> {
9245  let summary = [{
9246Returns true if and only if the given Optional variant has a value.
9247  }];
9248
9249  let arguments = (ins
9250    TF_VariantTensor:$optional
9251  );
9252
9253  let results = (outs
9254    TF_BoolTensor:$has_value
9255  );
9256}
9257
9258def TF_OutfeedEnqueueTupleOp : TF_Op<"OutfeedEnqueueTuple", []> {
9259  let summary = "Enqueue multiple Tensor values on the computation outfeed.";
9260
9261  let arguments = (ins
9262    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be inserted into the outfeed queue as an
9263XLA tuple.}]>:$inputs
9264  );
9265
9266  let results = (outs);
9267
9268  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<0>;
9269}
9270
9271def TF_PackOp : TF_Op<"Pack", [NoSideEffect]> {
9272  let summary = [{
9273Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor.
9274  }];
9275
9276  let description = [{
9277Packs the `N` tensors in `values` into a tensor with rank one higher than each
9278tensor in `values`, by packing them along the `axis` dimension.
9279Given a list of tensors of shape `(A, B, C)`;
9280
9281if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`.
9282if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`.
9283Etc.
9284
9285For example:
9286
9287```
9288# 'x' is [1, 4]
9289# 'y' is [2, 5]
9290# 'z' is [3, 6]
9291pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]]  # Pack along first dim.
9292pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]]
9293```
9294
9295This is the opposite of `unpack`.
9296  }];
9297
9298  let arguments = (ins
9299    Arg<Variadic<TF_Tensor>, [{Must be of same shape and type.}]>:$values,
9300
9301    DefaultValuedAttr<I64Attr, "0">:$axis
9302  );
9303
9304  let results = (outs
9305    Res<TF_Tensor, [{The packed tensor.}]>:$output
9306  );
9307
9308  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9309  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
9310
9311  let verifier = [{
9312    return Verify(*this);
9313  }];
9314
9315  let hasFolder = 1;
9316}
9317
9318def TF_PadOp : TF_Op<"Pad", [NoSideEffect, TF_FoldOperandsTransposeInterface, TF_OperandHasRank<1, 2>]> {
9319  let summary = "Pads a tensor with zeros.";
9320
9321  let description = [{
9322This operation pads a `input` with zeros according to the `paddings` you
9323specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the
9324rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
9325how many zeros to add before the contents of `input` in that dimension, and
9326`paddings[D, 1]` indicates how many zeros to add after the contents of `input`
9327in that dimension.
9328
9329The padded size of each dimension D of the output is:
9330
9331`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
9332
9333For example:
9334
9335```
9336# 't' is [[1, 1], [2, 2]]
9337# 'paddings' is [[1, 1], [2, 2]]
9338# rank of 't' is 2
9339pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
9340                      [0, 0, 1, 1, 0, 0]
9341                      [0, 0, 2, 2, 0, 0]
9342                      [0, 0, 0, 0, 0, 0]]
9343```
9344  }];
9345
9346  let arguments = (ins
9347    TF_Tensor:$input,
9348    TF_I32OrI64Tensor:$paddings
9349  );
9350
9351  let results = (outs
9352    TF_Tensor:$output
9353  );
9354
9355  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9356  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9357
9358  let extraClassDeclaration = [{
9359    // TF_FoldOperandsTransposeInterface:
9360    SmallVector<unsigned, 4> GetLayoutDependentArgs() { return {0}; }
9361    SmallVector<unsigned, 4> GetLayoutDependentResults() { return {0}; }
9362    LogicalResult FoldOperandsPermutation(ArrayRef<int64_t> permutation);
9363  }];
9364}
9365
9366def TF_PadV2Op : TF_Op<"PadV2", [NoSideEffect, TF_OperandHasRank<1, 2>]> {
9367  let summary = "Pads a tensor.";
9368
9369  let description = [{
9370This operation pads `input` according to the `paddings` and `constant_values`
9371you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is
9372the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates
9373how many padding values to add before the contents of `input` in that dimension,
9374and `paddings[D, 1]` indicates how many padding values to add after the contents
9375of `input` in that dimension. `constant_values` is a scalar tensor of the same
9376type as `input` that indicates the value to use for padding `input`.
9377
9378The padded size of each dimension D of the output is:
9379
9380`paddings(D, 0) + input.dim_size(D) + paddings(D, 1)`
9381
9382For example:
9383
9384```
9385# 't' is [[1, 1], [2, 2]]
9386# 'paddings' is [[1, 1], [2, 2]]
9387# 'constant_values' is 0
9388# rank of 't' is 2
9389pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0]
9390                      [0, 0, 1, 1, 0, 0]
9391                      [0, 0, 2, 2, 0, 0]
9392                      [0, 0, 0, 0, 0, 0]]
9393```
9394  }];
9395
9396  let arguments = (ins
9397    TF_Tensor:$input,
9398    TF_I32OrI64Tensor:$paddings,
9399    TF_Tensor:$constant_values
9400  );
9401
9402  let results = (outs
9403    TF_Tensor:$output
9404  );
9405
9406  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9407  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
9408}
9409
9410def TF_ParallelDynamicStitchOp : TF_Op<"ParallelDynamicStitch", [NoSideEffect, SameVariadicOperandSize]> {
9411  let summary = [{
9412Interleave the values from the `data` tensors into a single tensor.
9413  }];
9414
9415  let description = [{
9416Builds a merged tensor such that
9417
9418```python
9419    merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...]
9420```
9421
9422For example, if each `indices[m]` is scalar or vector, we have
9423
9424```python
9425    # Scalar indices:
9426    merged[indices[m], ...] = data[m][...]
9427
9428    # Vector indices:
9429    merged[indices[m][i], ...] = data[m][i, ...]
9430```
9431
9432Each `data[i].shape` must start with the corresponding `indices[i].shape`,
9433and the rest of `data[i].shape` must be constant w.r.t. `i`.  That is, we
9434must have `data[i].shape = indices[i].shape + constant`.  In terms of this
9435`constant`, the output shape is
9436
9437    merged.shape = [max(indices)] + constant
9438
9439Values may be merged in parallel, so if an index appears in both `indices[m][i]`
9440and `indices[n][j]`, the result may be invalid. This differs from the normal
9441DynamicStitch operator that defines the behavior in that case.
9442
9443For example:
9444
9445```python
9446    indices[0] = 6
9447    indices[1] = [4, 1]
9448    indices[2] = [[5, 2], [0, 3]]
9449    data[0] = [61, 62]
9450    data[1] = [[41, 42], [11, 12]]
9451    data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]]
9452    merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42],
9453              [51, 52], [61, 62]]
9454```
9455
9456This method can be used to merge partitions created by `dynamic_partition`
9457as illustrated on the following example:
9458
9459```python
9460    # Apply function (increments x_i) on elements for which a certain condition
9461    # apply (x_i != -1 in this example).
9462    x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4])
9463    condition_mask=tf.not_equal(x,tf.constant(-1.))
9464    partitioned_data = tf.dynamic_partition(
9465        x, tf.cast(condition_mask, tf.int32) , 2)
9466    partitioned_data[1] = partitioned_data[1] + 1.0
9467    condition_indices = tf.dynamic_partition(
9468        tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2)
9469    x = tf.dynamic_stitch(condition_indices, partitioned_data)
9470    # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain
9471    # unchanged.
9472```
9473
9474<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
9475<img style="width:100%" src="https://www.tensorflow.org/images/DynamicStitch.png" alt>
9476</div>
9477  }];
9478
9479  let arguments = (ins
9480    Variadic<TF_Int32Tensor>:$indices,
9481    Variadic<TF_Tensor>:$data
9482  );
9483
9484  let results = (outs
9485    TF_Tensor:$merged
9486  );
9487
9488  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
9489  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
9490}
9491
9492def TF_ParameterizedTruncatedNormalOp : TF_Op<"ParameterizedTruncatedNormal", [TF_CannotDuplicate]> {
9493  let summary = [{
9494Outputs random values from a normal distribution. The parameters may each be a
9495  }];
9496
9497  let description = [{
9498scalar which applies to the entire output, or a vector of length shape[0] which
9499stores the parameters for each batch.
9500  }];
9501
9502  let arguments = (ins
9503    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor. Batches are indexed by the 0th dimension.}]>:$shape,
9504    Arg<TF_FloatTensor, [{The mean parameter of each batch.}]>:$means,
9505    Arg<TF_FloatTensor, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stdevs,
9506    Arg<TF_FloatTensor, [{The minimum cutoff. May be -infinity.}]>:$minvals,
9507    Arg<TF_FloatTensor, [{The maximum cutoff. May be +infinity, and must be more than the minval
9508for each batch.}]>:$maxvals,
9509
9510    DefaultValuedAttr<I64Attr, "0">:$seed,
9511    DefaultValuedAttr<I64Attr, "0">:$seed2
9512  );
9513
9514  let results = (outs
9515    Res<TF_FloatTensor, [{A matrix of shape num_batches x samples_per_batch, filled with random
9516truncated normal values using the parameters for each row.}]>:$output
9517  );
9518
9519  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9520  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
9521}
9522
9523def TF_PolygammaOp : TF_Op<"Polygamma", [NoSideEffect, ResultsBroadcastableShape]>,
9524                     WithBroadcastableBinOpBuilder {
9525  let summary = [{
9526Compute the polygamma function \\(\psi^{(n)}(x)\\).
9527  }];
9528
9529  let description = [{
9530The polygamma function is defined as:
9531
9532
9533\\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\)
9534
9535where \\(\psi(x)\\) is the digamma function.
9536The polygamma function is defined only for non-negative integer orders \\a\\.
9537  }];
9538
9539  let arguments = (ins
9540    TF_F32OrF64Tensor:$a,
9541    TF_F32OrF64Tensor:$x
9542  );
9543
9544  let results = (outs
9545    TF_F32OrF64Tensor:$z
9546  );
9547
9548  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9549}
9550
9551def TF_PopulationCountOp : TF_Op<"PopulationCount", [NoSideEffect, SameOperandsAndResultShape]> {
9552  let summary = [{
9553Computes element-wise population count (a.k.a. popcount, bitsum, bitcount).
9554  }];
9555
9556  let description = [{
9557For each entry in `x`, calculates the number of `1` (on) bits in the binary
9558representation of that entry.
9559
9560**NOTE**: It is more efficient to first `tf.bitcast` your tensors into
9561`int32` or `int64` and perform the bitcount on the result, than to feed in
95628- or 16-bit inputs and then aggregate the resulting counts.
9563  }];
9564
9565  let arguments = (ins
9566    TF_IntTensor:$x
9567  );
9568
9569  let results = (outs
9570    TF_Uint8Tensor:$y
9571  );
9572
9573  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9574}
9575
9576def TF_PowOp : TF_Op<"Pow", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
9577               WithBroadcastableBinOpBuilder {
9578  let summary = "Computes the power of one value to another.";
9579
9580  let description = [{
9581Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
9582corresponding elements in `x` and `y`. For example:
9583
9584```
9585# tensor 'x' is [[2, 2]], [3, 3]]
9586# tensor 'y' is [[8, 16], [2, 3]]
9587tf.pow(x, y) ==> [[256, 65536], [9, 27]]
9588```
9589  }];
9590
9591  let arguments = (ins
9592    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x,
9593    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
9594  );
9595
9596  let results = (outs
9597    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$z
9598  );
9599
9600  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9601
9602  let hasFolder = 1;
9603}
9604
9605def TF_PreventGradientOp : TF_Op<"PreventGradient", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
9606  let summary = [{
9607An identity op that triggers an error if a gradient is requested.
9608  }];
9609
9610  let description = [{
9611When executed in a graph, this op outputs its input tensor as-is.
9612
9613When building ops to compute gradients, the TensorFlow gradient system
9614will return an error when trying to lookup the gradient of this op,
9615because no gradient must ever be registered for this function.  This
9616op exists to prevent subtle bugs from silently returning unimplemented
9617gradients in some corner cases.
9618  }];
9619
9620  let arguments = (ins
9621    Arg<TF_Tensor, [{any tensor.}]>:$input,
9622
9623    StrAttr:$message
9624  );
9625
9626  let results = (outs
9627    Res<TF_Tensor, [{the same input tensor.}]>:$output
9628  );
9629
9630  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9631}
9632
9633def TF_PrintV2Op : TF_Op<"PrintV2", []> {
9634  let summary = "Prints a string scalar.";
9635
9636  let description = [{
9637Prints a string scalar to the desired output_stream.
9638  }];
9639
9640  let arguments = (ins
9641    Arg<TF_StrTensor, [{The string scalar to print.}]>:$input,
9642
9643    DefaultValuedAttr<StrAttr, "stderr">:$output_stream,
9644    DefaultValuedAttr<StrAttr, "\n">:$end
9645  );
9646
9647  let results = (outs);
9648}
9649
9650def TF_ProdOp : TF_Op<"Prod", [NoSideEffect]> {
9651  let summary = [{
9652Computes the product of elements across dimensions of a tensor.
9653  }];
9654
9655  let description = [{
9656Reduces `input` along the dimensions given in `axis`. Unless
9657`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
9658`axis`. If `keep_dims` is true, the reduced dimensions are
9659retained with length 1.
9660  }];
9661
9662  let arguments = (ins
9663    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
9664    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
9665`[-rank(input), rank(input))`.}]>:$reduction_indices,
9666
9667    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
9668  );
9669
9670  let results = (outs
9671    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
9672  );
9673
9674  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9675  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
9676}
9677
9678def TF_QrOp : TF_Op<"Qr", [NoSideEffect]> {
9679  let summary = "Computes the QR decompositions of one or more matrices.";
9680
9681  let description = [{
9682Computes the QR decomposition of each inner matrix in `tensor` such that
9683`tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
9684
9685Currently, the gradient for the QR decomposition is well-defined only when
9686the first `P` columns of the inner matrix are linearly independent, where
9687`P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
9688
9689```python
9690# a is a tensor.
9691# q is a tensor of orthonormal matrices.
9692# r is a tensor of upper triangular matrices.
9693q, r = qr(a)
9694q_full, r_full = qr(a, full_matrices=True)
9695```
9696  }];
9697
9698  let arguments = (ins
9699    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
9700form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
9701
9702    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
9703  );
9704
9705  let results = (outs
9706    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Orthonormal basis for range of `a`. If `full_matrices` is `False` then
9707shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
9708`[..., M, M]`.}]>:$q,
9709    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Triangular factor. If `full_matrices` is `False` then shape is
9710`[..., P, N]`. If `full_matrices` is `True` then shape is `[..., M, N]`.}]>:$r
9711  );
9712
9713  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9714
9715  let verifier = [{
9716    return Verify(*this);
9717  }];
9718}
9719
9720def TF_QuantizeAndDequantizeOp : TF_Op<"QuantizeAndDequantize", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
9721  let summary = "Use QuantizeAndDequantizeV2 instead.";
9722
9723  let arguments = (ins
9724    TF_FloatTensor:$input,
9725
9726    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9727    DefaultValuedAttr<I64Attr, "8">:$num_bits,
9728    DefaultValuedAttr<BoolAttr, "false">:$range_given,
9729    DefaultValuedAttr<F32Attr, "0.0f">:$input_min,
9730    DefaultValuedAttr<F32Attr, "0.0f">:$input_max
9731  );
9732
9733  let results = (outs
9734    TF_FloatTensor:$output
9735  );
9736
9737  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9738}
9739
9740def TF_QuantizeAndDequantizeV2Op : TF_Op<"QuantizeAndDequantizeV2", [NoSideEffect]> {
9741  let summary = "Quantizes then dequantizes a tensor.";
9742
9743  let description = [{
9744This op simulates the precision loss from the quantized forward pass by:
9745
97461. Quantizing the tensor to fixed point numbers, which should match the target
9747   quantization method when it is used in inference.
97482. Dequantizing it back to floating point numbers for the following ops, most
9749   likely matmul.
9750
9751There are different ways to quantize. This version uses only scaling, so 0.0
9752maps to 0.
9753
9754From the specified 'num_bits' in the quantized output type, it determines
9755minimum and maximum representable quantized values.
9756
9757e.g.
9758
9759*   [-128, 127] for signed, num_bits = 8, or
9760*   [0, 255] for unsigned, num_bits = 8.
9761
9762If range_given == False, the initial input_min, input_max will be determined
9763automatically as the minimum and maximum values in the input tensor, otherwise
9764the specified values of input_min, input_max are used.
9765
9766Note: If the input_min, input_max are specified, they do not need to equal the
9767actual minimum and maximum values in the tensor. e.g. in some cases it may be
9768beneficial to specify these values such that the low probability extremes of the
9769input distribution are clipped.
9770
9771This op determines the maximum scale_factor that would map the initial
9772[input_min, input_max] range to a range that lies within the representable
9773quantized range.
9774
9775It determines the scale from one of input_min and input_max, then updates the
9776other one to maximize the representable range.
9777
9778e.g.
9779
9780*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
9781    5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it
9782    would update input_max to be 127 / 12.8 = 9.921875
9783*   if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0,
9784    10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it
9785    would update input_min to be 128.0 / 12.7 = -10.07874
9786*   if the output is unsigned, input_min is forced to be 0, and only the
9787    specified input_max is used.
9788
9789After determining the scale_factor and updating the input range, it applies the
9790following to each value in the 'input' tensor.
9791
9792output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor.
9793
9794The above round function rounds the value based on the given round_mode.
9795  }];
9796
9797  let arguments = (ins
9798    Arg<TF_FloatTensor, [{Tensor to quantize and then dequantize.}]>:$input,
9799    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the minimum input value that needs to
9800be represented, otherwise it is determined from the min value of the `input`
9801tensor.}]>:$input_min,
9802    Arg<TF_FloatTensor, [{If `range_given == True`, this specifies the maximum input value that needs to
9803be represented, otherwise it is determined from the max value of the `input`
9804tensor.}]>:$input_max,
9805
9806    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9807    DefaultValuedAttr<I64Attr, "8">:$num_bits,
9808    DefaultValuedAttr<BoolAttr, "false">:$range_given,
9809    DefaultValuedAttr<TF_AnyStrAttrOf<["HALF_TO_EVEN", "HALF_UP"]>, "HALF_TO_EVEN">:$round_mode,
9810    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
9811    DefaultValuedAttr<I64Attr, "-1">:$axis
9812  );
9813
9814  let results = (outs
9815    TF_FloatTensor:$output
9816  );
9817
9818  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9819}
9820
9821def TF_QuantizeAndDequantizeV3Op : TF_Op<"QuantizeAndDequantizeV3", [NoSideEffect]> {
9822  let summary = "Quantizes then dequantizes a tensor.";
9823
9824  let description = [{
9825This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a
9826tensor, so its value can change during training.
9827  }];
9828
9829  let arguments = (ins
9830    TF_FloatTensor:$input,
9831    TF_FloatTensor:$input_min,
9832    TF_FloatTensor:$input_max,
9833    TF_Int32Tensor:$num_bits,
9834
9835    DefaultValuedAttr<BoolAttr, "true">:$signed_input,
9836    DefaultValuedAttr<BoolAttr, "true">:$range_given,
9837    DefaultValuedAttr<BoolAttr, "false">:$narrow_range,
9838    DefaultValuedAttr<I64Attr, "-1">:$axis
9839  );
9840
9841  let results = (outs
9842    TF_FloatTensor:$output
9843  );
9844
9845  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
9846}
9847
9848def TF_QueueDequeueV2Op : TF_Op<"QueueDequeueV2", []> {
9849  let summary = "Dequeues a tuple of one or more tensors from the given queue.";
9850
9851  let description = [{
9852This operation has k outputs, where k is the number of components
9853in the tuples stored in the given queue, and output i is the ith
9854component of the dequeued tuple.
9855
9856N.B. If the queue is empty, this operation will block until an element
9857has been dequeued (or 'timeout_ms' elapses, if specified).
9858  }];
9859
9860  let arguments = (ins
9861    Arg<TF_ResourceTensor, [{The handle to a queue.}]>:$handle,
9862
9863    DefaultValuedAttr<I64Attr, "-1">:$timeout_ms
9864  );
9865
9866  let results = (outs
9867    Res<Variadic<TF_Tensor>, [{One or more tensors that were dequeued as a tuple.}]>:$components
9868  );
9869
9870  TF_DerivedResultTypeListAttr component_types = TF_DerivedResultTypeListAttr<0>;
9871}
9872
9873def TF_RFFTOp : TF_Op<"RFFT", [NoSideEffect]> {
9874  let summary = "Real-valued fast Fourier transform.";
9875
9876  let description = [{
9877Computes the 1-dimensional discrete Fourier transform of a real-valued signal
9878over the inner-most dimension of `input`.
9879
9880Since the DFT of a real signal is Hermitian-symmetric, `RFFT` only returns the
9881`fft_length / 2 + 1` unique components of the FFT: the zero-frequency term,
9882followed by the `fft_length / 2` positive-frequency terms.
9883
9884Along the axis `RFFT` is computed on, if `fft_length` is smaller than the
9885corresponding dimension of `input`, the dimension is cropped. If it is larger,
9886the dimension is padded with zeros.
9887  }];
9888
9889  let arguments = (ins
9890    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9891    Arg<TF_Int32Tensor, [{An int32 tensor of shape [1]. The FFT length.}]>:$fft_length
9892  );
9893
9894  let results = (outs
9895    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most
9896  dimension of `input` is replaced with the `fft_length / 2 + 1` unique
9897  frequency components of its 1D Fourier transform.
9898
9899@compatibility(numpy)
9900Equivalent to np.fft.rfft
9901@end_compatibility}]>:$output
9902  );
9903
9904  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9905  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9906}
9907
9908def TF_RFFT2DOp : TF_Op<"RFFT2D", [NoSideEffect]> {
9909  let summary = "2D real-valued fast Fourier transform.";
9910
9911  let description = [{
9912Computes the 2-dimensional discrete Fourier transform of a real-valued signal
9913over the inner-most 2 dimensions of `input`.
9914
9915Since the DFT of a real signal is Hermitian-symmetric, `RFFT2D` only returns the
9916`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
9917of `output`: the zero-frequency term, followed by the `fft_length / 2`
9918positive-frequency terms.
9919
9920Along each axis `RFFT2D` is computed on, if `fft_length` is smaller than the
9921corresponding dimension of `input`, the dimension is cropped. If it is larger,
9922the dimension is padded with zeros.
9923  }];
9924
9925  let arguments = (ins
9926    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9927    Arg<TF_Int32Tensor, [{An int32 tensor of shape [2]. The FFT length for each dimension.}]>:$fft_length
9928  );
9929
9930  let results = (outs
9931    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 2
9932  dimensions of `input` are replaced with their 2D Fourier transform. The
9933  inner-most dimension contains `fft_length / 2 + 1` unique frequency
9934  components.
9935
9936@compatibility(numpy)
9937Equivalent to np.fft.rfft2
9938@end_compatibility}]>:$output
9939  );
9940
9941  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9942  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9943}
9944
9945def TF_RFFT3DOp : TF_Op<"RFFT3D", [NoSideEffect]> {
9946  let summary = "3D real-valued fast Fourier transform.";
9947
9948  let description = [{
9949Computes the 3-dimensional discrete Fourier transform of a real-valued signal
9950over the inner-most 3 dimensions of `input`.
9951
9952Since the DFT of a real signal is Hermitian-symmetric, `RFFT3D` only returns the
9953`fft_length / 2 + 1` unique components of the FFT for the inner-most dimension
9954of `output`: the zero-frequency term, followed by the `fft_length / 2`
9955positive-frequency terms.
9956
9957Along each axis `RFFT3D` is computed on, if `fft_length` is smaller than the
9958corresponding dimension of `input`, the dimension is cropped. If it is larger,
9959the dimension is padded with zeros.
9960  }];
9961
9962  let arguments = (ins
9963    Arg<TF_F32OrF64Tensor, [{A float32 tensor.}]>:$input,
9964    Arg<TF_Int32Tensor, [{An int32 tensor of shape [3]. The FFT length for each dimension.}]>:$fft_length
9965  );
9966
9967  let results = (outs
9968    Res<TensorOf<[TF_Complex128, TF_Complex64]>, [{A complex64 tensor of the same rank as `input`. The inner-most 3
9969  dimensions of `input` are replaced with the their 3D Fourier transform. The
9970  inner-most dimension contains `fft_length / 2 + 1` unique frequency
9971  components.
9972
9973@compatibility(numpy)
9974Equivalent to np.fft.rfftn with 3 dimensions.
9975@end_compatibility}]>:$output
9976  );
9977
9978  TF_DerivedOperandTypeAttr Treal = TF_DerivedOperandTypeAttr<0>;
9979  TF_DerivedResultTypeAttr Tcomplex = TF_DerivedResultTypeAttr<0>;
9980}
9981
9982def TF_RGBToHSVOp : TF_Op<"RGBToHSV", [NoSideEffect]> {
9983  let summary = "Converts one or more images from RGB to HSV.";
9984
9985  let description = [{
9986Outputs a tensor of the same shape as the `images` tensor, containing the HSV
9987value of the pixels. The output is only well defined if the value in `images`
9988are in `[0,1]`.
9989
9990`output[..., 0]` contains hue, `output[..., 1]` contains saturation, and
9991`output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0
9992corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
9993
9994Usage Example:
9995
9996>>> blue_image = tf.stack([
9997...    tf.zeros([5,5]),
9998...    tf.zeros([5,5]),
9999...    tf.ones([5,5])],
10000...    axis=-1)
10001>>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image)
10002>>> blue_hsv_image[0,0].numpy()
10003array([0.6666667, 1. , 1. ], dtype=float32)
10004  }];
10005
10006  let arguments = (ins
10007    Arg<TF_FloatTensor, [{1-D or higher rank. RGB data to convert. Last dimension must be size 3.}]>:$images
10008  );
10009
10010  let results = (outs
10011    Res<TF_FloatTensor, [{`images` converted to HSV.}]>:$output
10012  );
10013
10014  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10015}
10016
10017def TF_RaggedGatherOp : TF_Op<"RaggedGather", [NoSideEffect]> {
10018  let summary = [{
10019Gather ragged slices from `params` axis `0` according to `indices`.
10020  }];
10021
10022  let description = [{
10023Outputs a `RaggedTensor` output composed from `output_dense_values` and
10024`output_nested_splits`, such that:
10025
10026```python
10027output.shape = indices.shape + params.shape[1:]
10028output.ragged_rank = indices.shape.ndims + params.ragged_rank
10029output[i...j, d0...dn] = params[indices[i...j], d0...dn]
10030```
10031
10032where
10033
10034* `params =
10035   ragged.from_nested_row_splits(params_dense_values, params_nested_splits)`
10036   provides the values that should be gathered.
10037* `indices` ia a dense tensor with dtype `int32` or `int64`, indicating which
10038   values should be gathered.
10039* `output =
10040   ragged.from_nested_row_splits(output_dense_values, output_nested_splits)`
10041   is the output tensor.
10042
10043(Note: This c++ op is used to implement the higher-level python
10044`tf.ragged.gather` op, which also supports ragged indices.)
10045  }];
10046
10047  let arguments = (ins
10048    Arg<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
10049`params` RaggedTensor input.}]>:$params_nested_splits,
10050    Arg<TF_Tensor, [{The `flat_values` for the `params` RaggedTensor. There was a terminology change
10051at the python level from dense_values to flat_values, so dense_values is the
10052deprecated name.}]>:$params_dense_values,
10053    Arg<TF_I32OrI64Tensor, [{Indices in the outermost dimension of `params` of the values that should be
10054gathered.}]>:$indices
10055  );
10056
10057  let results = (outs
10058    Res<Variadic<TF_I32OrI64Tensor>, [{The `nested_row_splits` tensors that define the row-partitioning for the
10059returned RaggedTensor.}]>:$output_nested_splits,
10060    Res<TF_Tensor, [{The `flat_values` for the returned RaggedTensor.}]>:$output_dense_values
10061  );
10062
10063  TF_DerivedOperandTypeAttr Tsplits = TF_DerivedOperandTypeAttr<0>;
10064  TF_DerivedOperandTypeAttr Tvalues = TF_DerivedOperandTypeAttr<1>;
10065  TF_DerivedResultSizeAttr OUTPUT_RAGGED_RANK = TF_DerivedResultSizeAttr<0>;
10066  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
10067  TF_DerivedOperandSizeAttr PARAMS_RAGGED_RANK = TF_DerivedOperandSizeAttr<0>;
10068}
10069
10070def TF_RaggedRangeOp : TF_Op<"RaggedRange", [NoSideEffect]> {
10071  let summary = [{
10072Returns a `RaggedTensor` containing the specified sequences of numbers.
10073  }];
10074
10075  let description = [{
10076Returns a `RaggedTensor` `result` composed from `rt_dense_values` and
10077`rt_nested_splits`, such that
10078`result[i] = range(starts[i], limits[i], deltas[i])`.
10079
10080```python
10081(rt_nested_splits, rt_dense_values) = ragged_range(
10082      starts=[2, 5, 8], limits=[3, 5, 12], deltas=1)
10083result = tf.ragged.from_row_splits(rt_dense_values, rt_nested_splits)
10084print(result)
10085<tf.RaggedTensor [[2], [], [8, 9, 10, 11]] >
10086```
10087
10088The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
10089The vector inputs must all have the same size.  Scalar inputs are broadcast
10090to match the size of the vector inputs.
10091  }];
10092
10093  let arguments = (ins
10094    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The starts of each range.}]>:$starts,
10095    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The limits of each range.}]>:$limits,
10096    Arg<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The deltas of each range.}]>:$deltas
10097  );
10098
10099  let results = (outs
10100    Res<TF_I32OrI64Tensor, [{The `row_splits` for the returned `RaggedTensor`.}]>:$rt_nested_splits,
10101    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The `flat_values` for the returned `RaggedTensor`.}]>:$rt_dense_values
10102  );
10103
10104  TF_DerivedResultTypeAttr Tsplits = TF_DerivedResultTypeAttr<0>;
10105  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10106}
10107
10108def TF_RandomGammaOp : TF_Op<"RandomGamma", [TF_CannotDuplicate]> {
10109  let summary = [{
10110Outputs random values from the Gamma distribution(s) described by alpha.
10111  }];
10112
10113  let description = [{
10114This op uses the algorithm by Marsaglia et al. to acquire samples via
10115transformation-rejection from pairs of uniform and normal random variables.
10116See http://dl.acm.org/citation.cfm?id=358414
10117  }];
10118
10119  let arguments = (ins
10120    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
10121distribution described by the shape parameters given in alpha.}]>:$shape,
10122    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor in which each scalar is a "shape" parameter describing the
10123associated gamma distribution.}]>:$alpha,
10124
10125    DefaultValuedAttr<I64Attr, "0">:$seed,
10126    DefaultValuedAttr<I64Attr, "0">:$seed2
10127  );
10128
10129  let results = (outs
10130    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{A tensor with shape `shape + shape(alpha)`. Each slice
10131`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
10132`alpha[i0, i1, ...iN]`. The dtype of the output matches the dtype of alpha.}]>:$output
10133  );
10134
10135  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10136  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
10137}
10138
10139def TF_RandomGammaGradOp : TF_Op<"RandomGammaGrad", [NoSideEffect, ResultsBroadcastableShape]>,
10140                           WithBroadcastableBinOpBuilder {
10141  let summary = [{
10142Computes the derivative of a Gamma random sample w.r.t. `alpha`.
10143  }];
10144
10145  let arguments = (ins
10146    TF_F32OrF64Tensor:$alpha,
10147    TF_F32OrF64Tensor:$sample
10148  );
10149
10150  let results = (outs
10151    TF_F32OrF64Tensor:$output
10152  );
10153
10154  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10155}
10156
10157def TF_RandomPoissonOp : TF_Op<"RandomPoisson", [TF_CannotDuplicate]> {
10158  let summary = "Use RandomPoissonV2 instead.";
10159
10160  let arguments = (ins
10161    TF_I32OrI64Tensor:$shape,
10162    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$rate,
10163
10164    DefaultValuedAttr<I64Attr, "0">:$seed,
10165    DefaultValuedAttr<I64Attr, "0">:$seed2
10166  );
10167
10168  let results = (outs
10169    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$output
10170  );
10171
10172  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10173  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<1>;
10174}
10175
10176def TF_RandomPoissonV2Op : TF_Op<"RandomPoissonV2", [TF_CannotDuplicate]> {
10177  let summary = [{
10178Outputs random values from the Poisson distribution(s) described by rate.
10179  }];
10180
10181  let description = [{
10182This op uses two algorithms, depending on rate. If rate >= 10, then
10183the algorithm by Hormann is used to acquire samples via
10184transformation-rejection.
10185See http://www.sciencedirect.com/science/article/pii/0167668793909974.
10186
10187Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
10188random variables.
10189See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
10190Programming, Volume 2. Addison Wesley
10191  }];
10192
10193  let arguments = (ins
10194    Arg<TF_I32OrI64Tensor, [{1-D integer tensor. Shape of independent samples to draw from each
10195distribution described by the shape parameters given in rate.}]>:$shape,
10196    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor in which each scalar is a "rate" parameter describing the
10197associated poisson distribution.}]>:$rate,
10198
10199    DefaultValuedAttr<I64Attr, "0">:$seed,
10200    DefaultValuedAttr<I64Attr, "0">:$seed2
10201  );
10202
10203  let results = (outs
10204    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{A tensor with shape `shape + shape(rate)`. Each slice
10205`[:, ..., :, i0, i1, ...iN]` contains the samples drawn for
10206`rate[i0, i1, ...iN]`.}]>:$output
10207  );
10208
10209  TF_DerivedOperandTypeAttr R = TF_DerivedOperandTypeAttr<1>;
10210  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
10211  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10212}
10213
10214def TF_RandomShuffleOp : TF_Op<"RandomShuffle", [TF_CannotDuplicate, TF_SameOperandsAndResultTypeResolveRef]> {
10215  let summary = "Randomly shuffles a tensor along its first dimension.";
10216
10217  let description = [{
10218The tensor is shuffled along dimension 0, such that each `value[j]` is mapped
10219  to one and only one `output[i]`. For example, a mapping that might occur for a
10220  3x2 tensor is:
10221
10222```
10223[[1, 2],       [[5, 6],
10224 [3, 4],  ==>   [1, 2],
10225 [5, 6]]        [3, 4]]
10226```
10227  }];
10228
10229  let arguments = (ins
10230    Arg<TF_Tensor, [{The tensor to be shuffled.}]>:$value,
10231
10232    DefaultValuedAttr<I64Attr, "0">:$seed,
10233    DefaultValuedAttr<I64Attr, "0">:$seed2
10234  );
10235
10236  let results = (outs
10237    Res<TF_Tensor, [{A tensor of same shape and type as `value`, shuffled along its first
10238dimension.}]>:$output
10239  );
10240
10241  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10242}
10243
10244def TF_RandomStandardNormalOp : TF_Op<"RandomStandardNormal", [TF_CannotDuplicate]> {
10245  let summary = "Outputs random values from a normal distribution.";
10246
10247  let description = [{
10248The generated values will have mean 0 and standard deviation 1.
10249  }];
10250
10251  let arguments = (ins
10252    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10253
10254    DefaultValuedAttr<I64Attr, "0">:$seed,
10255    DefaultValuedAttr<I64Attr, "0">:$seed2
10256  );
10257
10258  let results = (outs
10259    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random normal values.}]>:$output
10260  );
10261
10262  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10263  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10264}
10265
10266def TF_RandomUniformOp : TF_Op<"RandomUniform", [TF_CannotDuplicate]> {
10267  let summary = "Outputs random values from a uniform distribution.";
10268
10269  let description = [{
10270The generated values follow a uniform distribution in the range `[0, 1)`. The
10271lower bound 0 is included in the range, while the upper bound 1 is excluded.
10272  }];
10273
10274  let arguments = (ins
10275    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10276
10277    DefaultValuedAttr<I64Attr, "0">:$seed,
10278    DefaultValuedAttr<I64Attr, "0">:$seed2
10279  );
10280
10281  let results = (outs
10282    Res<TF_FloatTensor, [{A tensor of the specified shape filled with uniform random values.}]>:$output
10283  );
10284
10285  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10286  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10287
10288  let verifier = [{
10289    return Verify(*this);
10290  }];
10291}
10292
10293def TF_RandomUniformIntOp : TF_Op<"RandomUniformInt", [TF_CannotDuplicate]> {
10294  let summary = "Outputs random integers from a uniform distribution.";
10295
10296  let description = [{
10297The generated values are uniform integers in the range `[minval, maxval)`.
10298The lower bound `minval` is included in the range, while the upper bound
10299`maxval` is excluded.
10300
10301The random integers are slightly biased unless `maxval - minval` is an exact
10302power of two.  The bias is small for values of `maxval - minval` significantly
10303smaller than the range of the output (either `2^32` or `2^64`).
10304  }];
10305
10306  let arguments = (ins
10307    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
10308    Arg<TF_I32OrI64Tensor, [{0-D.  Inclusive lower bound on the generated integers.}]>:$minval,
10309    Arg<TF_I32OrI64Tensor, [{0-D.  Exclusive upper bound on the generated integers.}]>:$maxval,
10310
10311    DefaultValuedAttr<I64Attr, "0">:$seed,
10312    DefaultValuedAttr<I64Attr, "0">:$seed2
10313  );
10314
10315  let results = (outs
10316    Res<TF_I32OrI64Tensor, [{A tensor of the specified shape filled with uniform random integers.}]>:$output
10317  );
10318
10319  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10320  TF_DerivedOperandTypeAttr Tout = TF_DerivedOperandTypeAttr<1>;
10321}
10322
10323def TF_RangeOp : TF_Op<"Range", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
10324  let summary = "Creates a sequence of numbers.";
10325
10326  let description = [{
10327This operation creates a sequence of numbers that begins at `start` and
10328extends by increments of `delta` up to but not including `limit`.
10329
10330For example:
10331
10332```
10333# 'start' is 3
10334# 'limit' is 18
10335# 'delta' is 3
10336tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
10337```
10338  }];
10339
10340  let arguments = (ins
10341    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). First entry in the sequence.}]>:$start,
10342    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Upper limit of sequence, exclusive.}]>:$limit,
10343    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{0-D (scalar). Optional. Default is 1. Number that increments `start`.}]>:$delta
10344  );
10345
10346  let results = (outs
10347    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>, [{1-D.}]>:$output
10348  );
10349
10350  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<0>;
10351
10352  let builders = [
10353    OpBuilderDAG<(ins "Value":$start, "Value":$limit, "Value":$delta)>
10354  ];
10355}
10356
10357def TF_RangeDatasetOp : TF_Op<"RangeDataset", []> {
10358  let summary = [{
10359Creates a dataset with a range of values. Corresponds to python's xrange.
10360  }];
10361
10362  let arguments = (ins
10363    Arg<TF_Int64Tensor, [{corresponds to start in python's xrange().}]>:$start,
10364    Arg<TF_Int64Tensor, [{corresponds to stop in python's xrange().}]>:$stop,
10365    Arg<TF_Int64Tensor, [{corresponds to step in python's xrange().}]>:$step,
10366
10367    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
10368    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
10369  );
10370
10371  let results = (outs
10372    TF_VariantTensor:$handle
10373  );
10374}
10375
10376def TF_RankOp : TF_Op<"Rank", [NoSideEffect]> {
10377  let summary = "Returns the rank of a tensor.";
10378
10379  let description = [{
10380This operation returns an integer representing the rank of `input`.
10381
10382For example:
10383
10384```
10385# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
10386# shape of tensor 't' is [2, 2, 3]
10387rank(t) ==> 3
10388```
10389
10390**Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
10391of a tensor is the number of indices required to uniquely select each element
10392of the tensor. Rank is also known as "order", "degree", or "ndims."
10393  }];
10394
10395  let arguments = (ins
10396    TF_Tensor:$input
10397  );
10398
10399  let results = (outs
10400    TF_Int32Tensor:$output
10401  );
10402
10403  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10404
10405  let builders = [
10406    OpBuilderDAG<(ins "Value":$input)>
10407  ];
10408
10409  let hasFolder = 1;
10410}
10411
10412def TF_ReadVariableOp : TF_Op<"ReadVariableOp", []> {
10413  let summary = "Reads the value of a variable.";
10414
10415  let description = [{
10416The tensor returned by this operation is immutable.
10417
10418The value returned by this operation is guaranteed to be influenced by all the
10419writes on which this operation depends directly or indirectly, and to not be
10420influenced by any of the writes which depend directly or indirectly on this
10421operation.
10422  }];
10423
10424  let arguments = (ins
10425    Arg<TF_ResourceTensor, [{handle to the resource in which to store the variable.}], [TF_VariableRead]>:$resource
10426  );
10427
10428  let results = (outs
10429    TF_Tensor:$value
10430  );
10431
10432  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
10433
10434  let hasCanonicalizer = 1;
10435}
10436
10437def TF_RealOp : TF_Op<"Real", [NoSideEffect, SameOperandsAndResultShape]> {
10438  let summary = "Returns the real part of a complex number.";
10439
10440  let description = [{
10441Given a tensor `input` of complex numbers, this operation returns a tensor of
10442type `float` that is the real part of each element in `input`. All elements in
10443`input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
10444 part returned by this operation and *b* is the imaginary part.
10445
10446For example:
10447
10448```
10449# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
10450tf.real(input) ==> [-2.25, 3.25]
10451```
10452  }];
10453
10454  let arguments = (ins
10455    TensorOf<[TF_Complex128, TF_Complex64]>:$input
10456  );
10457
10458  let results = (outs
10459    TF_F32OrF64Tensor:$output
10460  );
10461
10462  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10463  TF_DerivedResultTypeAttr Tout = TF_DerivedResultTypeAttr<0>;
10464}
10465
10466def TF_ReciprocalOp : TF_Op<"Reciprocal", [Involution, NoSideEffect, SameOperandsAndResultType]> {
10467  let summary = "Computes the reciprocal of x element-wise.";
10468
10469  let description = [{
10470I.e., \\(y = 1 / x\\).
10471  }];
10472
10473  let arguments = (ins
10474    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
10475  );
10476
10477  let results = (outs
10478    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
10479  );
10480
10481  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10482}
10483
10484def TF_ReciprocalGradOp : TF_Op<"ReciprocalGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10485  let summary = "Computes the gradient for the inverse of `x` wrt its input.";
10486
10487  let description = [{
10488Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
10489is the corresponding input gradient.
10490  }];
10491
10492  let arguments = (ins
10493    TF_FpOrComplexTensor:$y,
10494    TF_FpOrComplexTensor:$dy
10495  );
10496
10497  let results = (outs
10498    TF_FpOrComplexTensor:$z
10499  );
10500
10501  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10502}
10503
10504def TF_RecvOp : TF_Op<"Recv", []> {
10505  let summary = "Receives the named tensor from send_device on recv_device.";
10506
10507  let arguments = (ins
10508    StrAttr:$tensor_name,
10509    StrAttr:$send_device,
10510    I64Attr:$send_device_incarnation,
10511    StrAttr:$recv_device,
10512    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
10513  );
10514
10515  let results = (outs
10516    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
10517  );
10518
10519  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
10520}
10521
10522def TF_RecvTPUEmbeddingActivationsOp : TF_Op<"RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
10523  let summary = "An op that receives embedding activations on the TPU.";
10524
10525  let description = [{
10526The TPU system performs the embedding lookups and aggregations specified by
10527the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
10528results of these aggregations are visible to the Tensorflow Graph as the
10529outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
10530one Tensor of activations per table specified in the model. There can be at
10531most one RecvTPUEmbeddingActivations op in the TPU graph.
10532  }];
10533
10534  let arguments = (ins
10535    StrAttr:$config
10536  );
10537
10538  let results = (outs
10539    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
10540embedding table in the model.}]>:$outputs
10541  );
10542
10543  TF_DerivedResultSizeAttr num_outputs = TF_DerivedResultSizeAttr<0>;
10544}
10545
10546def TF_ReduceJoinOp : TF_Op<"ReduceJoin", [NoSideEffect]> {
10547  let summary = "Joins a string Tensor across the given dimensions.";
10548
10549  let description = [{
10550Computes the string join across dimensions in the given string Tensor of shape
10551`[\\(d_0, d_1, ..., d_{n-1}\\)]`.  Returns a new Tensor created by joining the input
10552strings with the given separator (default: empty string).  Negative indices are
10553counted backwards from the end, with `-1` being equivalent to `n - 1`.  If
10554indices are not specified, joins across all dimensions beginning from `n - 1`
10555through `0`.
10556
10557For example:
10558
10559```python
10560# tensor `a` is [["a", "b"], ["c", "d"]]
10561tf.reduce_join(a, 0) ==> ["ac", "bd"]
10562tf.reduce_join(a, 1) ==> ["ab", "cd"]
10563tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"]
10564tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"]
10565tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]]
10566tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]]
10567tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"]
10568tf.reduce_join(a, [0, 1]) ==> "acbd"
10569tf.reduce_join(a, [1, 0]) ==> "abcd"
10570tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]]
10571tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd"
10572```
10573  }];
10574
10575  let arguments = (ins
10576    Arg<TF_StrTensor, [{The input to be joined.  All reduced indices must have non-zero size.}]>:$inputs,
10577    Arg<TF_Int32Tensor, [{The dimensions to reduce over.  Dimensions are reduced in the
10578order specified.  Omitting `reduction_indices` is equivalent to passing
10579`[n-1, n-2, ..., 0]`.  Negative indices from `-n` to `-1` are supported.}]>:$reduction_indices,
10580
10581    DefaultValuedAttr<BoolAttr, "false">:$keep_dims,
10582    StrAttr:$separator
10583  );
10584
10585  let results = (outs
10586    Res<TF_StrTensor, [{Has shape equal to that of the input with reduced dimensions removed or
10587set to `1` depending on `keep_dims`.}]>:$output
10588  );
10589}
10590
10591def TF_ReluOp : TF_Op<"Relu", [Idempotent, NoSideEffect, SameOperandsAndResultType, TF_ContractionFusableInterface, TF_LayoutAgnostic]> {
10592  let summary = "Computes rectified linear: `max(features, 0)`.";
10593
10594  let description = [{
10595See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
10596Example usage:
10597>>> tf.nn.relu([-2., 0., -0., 3.]).numpy()
10598array([ 0.,  0., -0.,  3.], dtype=float32)
10599  }];
10600
10601  let arguments = (ins
10602    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$features
10603  );
10604
10605  let results = (outs
10606    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$activations
10607  );
10608
10609  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10610
10611  let extraClassDeclaration = [{
10612    // TF_ContractionFusableInterface:
10613    Optional<ContractionFusion> GetContractionFusion();
10614  }];
10615}
10616
10617def TF_Relu6Op : TF_Op<"Relu6", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
10618  let summary = "Computes rectified linear 6: `min(max(features, 0), 6)`.";
10619
10620  let arguments = (ins
10621    TF_IntOrFpTensor:$features
10622  );
10623
10624  let results = (outs
10625    TF_IntOrFpTensor:$activations
10626  );
10627
10628  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10629}
10630
10631def TF_Relu6GradOp : TF_Op<"Relu6Grad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10632  let summary = "Computes rectified linear 6 gradients for a Relu6 operation.";
10633
10634  let arguments = (ins
10635    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu6 operation.}]>:$gradients,
10636    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu6 operation, or
10637its output; using either one produces the same result.}]>:$features
10638  );
10639
10640  let results = (outs
10641    Res<TF_IntOrFpTensor, [{The gradients:
10642`gradients * (features > 0) * (features < 6)`.}]>:$backprops
10643  );
10644
10645  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10646}
10647
10648def TF_ReluGradOp : TF_Op<"ReluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
10649  let summary = "Computes rectified linear gradients for a Relu operation.";
10650
10651  let arguments = (ins
10652    Arg<TF_IntOrFpTensor, [{The backpropagated gradients to the corresponding Relu operation.}]>:$gradients,
10653    Arg<TF_IntOrFpTensor, [{The features passed as input to the corresponding Relu operation, OR
10654the outputs of that operation (both work equivalently).}]>:$features
10655  );
10656
10657  let results = (outs
10658    Res<TF_IntOrFpTensor, [{`gradients * (features > 0)`.}]>:$backprops
10659  );
10660
10661  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10662}
10663
10664def TF_RemoteCallOp : TF_Op<"RemoteCall", []> {
10665  let summary = "Runs function `f` on a remote device indicated by `target`.";
10666
10667  let arguments = (ins
10668    Arg<TF_StrTensor, [{A fully specified device name where we want to run the function.}]>:$target,
10669    Arg<Variadic<TF_Tensor>, [{A list of arguments for the function.}]>:$args,
10670
10671    SymbolRefAttr:$f
10672  );
10673
10674  let results = (outs
10675    Res<Variadic<TF_Tensor>, [{A list of return values.}]>:$output
10676  );
10677
10678  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
10679  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
10680}
10681
10682def TF_ReshapeOp : TF_Op<"Reshape", [NoSideEffect]> {
10683  let summary = "Reshapes a tensor.";
10684
10685  let description = [{
10686Given `tensor`, this operation returns a tensor that has the same values
10687as `tensor` with shape `shape`.
10688
10689If one component of 1-D tensor `shape` is the special value -1, the size of that
10690dimension is computed so that the total size remains constant.  In particular, a
10691`shape` of `[-1]` flattens into 1-D.  At most one component of `shape` may be
10692unknown.
10693
10694The `shape` must be 1-D and the operation returns a tensor with shape
10695`shape` filled with the values of `tensor`. In this case, the number of elements
10696implied by `shape` must be the same as the number of elements in `tensor`.
10697
10698It is an error if `shape` is not 1-D.
10699
10700For example:
10701
10702```
10703# tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
10704# tensor 't' has shape [9]
10705reshape(t, [3, 3]) ==> [[1, 2, 3],
10706                        [4, 5, 6],
10707                        [7, 8, 9]]
10708
10709# tensor 't' is [[[1, 1], [2, 2]],
10710#                [[3, 3], [4, 4]]]
10711# tensor 't' has shape [2, 2, 2]
10712reshape(t, [2, 4]) ==> [[1, 1, 2, 2],
10713                        [3, 3, 4, 4]]
10714
10715# tensor 't' is [[[1, 1, 1],
10716#                 [2, 2, 2]],
10717#                [[3, 3, 3],
10718#                 [4, 4, 4]],
10719#                [[5, 5, 5],
10720#                 [6, 6, 6]]]
10721# tensor 't' has shape [3, 2, 3]
10722# pass '[-1]' to flatten 't'
10723reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
10724
10725# -1 can also be used to infer the shape
10726
10727# -1 is inferred to be 9:
10728reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
10729                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
10730# -1 is inferred to be 2:
10731reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3],
10732                         [4, 4, 4, 5, 5, 5, 6, 6, 6]]
10733# -1 is inferred to be 3:
10734reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1],
10735                              [2, 2, 2],
10736                              [3, 3, 3]],
10737                             [[4, 4, 4],
10738                              [5, 5, 5],
10739                              [6, 6, 6]]]
10740
10741# tensor 't' is [7]
10742# shape `[]` reshapes to a scalar
10743reshape(t, []) ==> 7
10744```
10745  }];
10746
10747  let arguments = (ins
10748    TF_Tensor:$tensor,
10749    Arg<TF_I32OrI64Tensor, [{Defines the shape of the output tensor.}]>:$shape
10750  );
10751
10752  let results = (outs
10753    TF_Tensor:$output
10754  );
10755
10756  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10757  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<1>;
10758
10759  let builders = [
10760    OpBuilderDAG<(ins "Value":$tensor, "Value":$shape)>
10761  ];
10762
10763  let verifier = [{
10764    return Verify(*this);
10765  }];
10766
10767  let hasCanonicalizer = 1;
10768  let hasFolder = 1;
10769}
10770
10771def TF_ResizeBilinearOp : TF_Op<"ResizeBilinear", [NoSideEffect]> {
10772  let summary = "Resize `images` to `size` using bilinear interpolation.";
10773
10774  let description = [{
10775Input images can be of different types but output images are always float.
10776  }];
10777
10778  let arguments = (ins
10779    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
10780    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10781new size for the images.}]>:$size,
10782
10783    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10784    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10785  );
10786
10787  let results = (outs
10788    Res<TF_Float32Tensor, [{4-D with shape
10789`[batch, new_height, new_width, channels]`.}]>:$resized_images
10790  );
10791
10792  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10793}
10794
10795def TF_ResizeBilinearGradOp : TF_Op<"ResizeBilinearGrad", [NoSideEffect]> {
10796  let summary = "Computes the gradient of bilinear interpolation.";
10797
10798  let arguments = (ins
10799    Arg<TF_Float32Tensor, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
10800    Arg<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`,
10801The image tensor that was resized.}]>:$original_image,
10802
10803    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10804    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10805  );
10806
10807  let results = (outs
10808    Res<TF_FloatTensor, [{4-D with shape `[batch, orig_height, orig_width, channels]`.
10809Gradients with respect to the input image. Input image must have been
10810float or double.}]>:$output
10811  );
10812
10813  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
10814}
10815
10816def TF_ResizeNearestNeighborOp : TF_Op<"ResizeNearestNeighbor", [NoSideEffect]> {
10817  let summary = [{
10818Resize `images` to `size` using nearest neighbor interpolation.
10819  }];
10820
10821  let arguments = (ins
10822    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$images,
10823    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `new_height, new_width`.  The
10824new size for the images.}]>:$size,
10825
10826    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10827    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10828  );
10829
10830  let results = (outs
10831    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint8]>, [{4-D with shape
10832`[batch, new_height, new_width, channels]`.}]>:$resized_images
10833  );
10834
10835  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10836}
10837
10838def TF_ResizeNearestNeighborGradOp : TF_Op<"ResizeNearestNeighborGrad", [NoSideEffect]> {
10839  let summary = "Computes the gradient of nearest neighbor interpolation.";
10840
10841  let arguments = (ins
10842    Arg<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, height, width, channels]`.}]>:$grads,
10843    Arg<TF_Int32Tensor, [{= A 1-D int32 Tensor of 2 elements: `orig_height, orig_width`. The
10844original input size.}]>:$size,
10845
10846    DefaultValuedAttr<BoolAttr, "false">:$align_corners,
10847    DefaultValuedAttr<BoolAttr, "false">:$half_pixel_centers
10848  );
10849
10850  let results = (outs
10851    Res<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int8, TF_Uint8]>, [{4-D with shape `[batch, orig_height, orig_width, channels]`. Gradients
10852with respect to the input image.}]>:$output
10853  );
10854
10855  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
10856}
10857
10858def TF_ResourceApplyAdaMaxOp : TF_Op<"ResourceApplyAdaMax", []> {
10859  let summary = "Update '*var' according to the AdaMax algorithm.";
10860
10861  let description = [{
10862m_t <- beta1 * m_{t-1} + (1 - beta1) * g
10863v_t <- max(beta2 * v_{t-1}, abs(g))
10864variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
10865  }];
10866
10867  let arguments = (ins
10868    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10869    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
10870    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
10871    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
10872    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10873    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
10874    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
10875    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
10876    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10877
10878    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10879  );
10880
10881  let results = (outs);
10882
10883  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10884}
10885
10886def TF_ResourceApplyAdadeltaOp : TF_Op<"ResourceApplyAdadelta", []> {
10887  let summary = "Update '*var' according to the adadelta scheme.";
10888
10889  let description = [{
10890accum = rho() * accum + (1 - rho()) * grad.square();
10891update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
10892update_accum = rho() * update_accum + (1 - rho()) * update.square();
10893var -= update;
10894  }];
10895
10896  let arguments = (ins
10897    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10898    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10899    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum_update,
10900    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10901    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay factor. Must be a scalar.}]>:$rho,
10902    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
10903    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10904
10905    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10906  );
10907
10908  let results = (outs);
10909
10910  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10911}
10912
10913def TF_ResourceApplyAdagradOp : TF_Op<"ResourceApplyAdagrad", []> {
10914  let summary = "Update '*var' according to the adagrad scheme.";
10915
10916  let description = [{
10917accum += grad * grad
10918var -= lr * grad * (1 / sqrt(accum))
10919  }];
10920
10921  let arguments = (ins
10922    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10923    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10924    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10925    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10926
10927    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10928    DefaultValuedAttr<BoolAttr, "true">:$update_slots
10929  );
10930
10931  let results = (outs);
10932
10933  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
10934}
10935
10936def TF_ResourceApplyAdagradDAOp : TF_Op<"ResourceApplyAdagradDA", []> {
10937  let summary = "Update '*var' according to the proximal adagrad scheme.";
10938
10939  let arguments = (ins
10940    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10941    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_accumulator,
10942    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$gradient_squared_accumulator,
10943    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10944    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10945    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
10946    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
10947    Arg<TF_Int64Tensor, [{Training step number. Must be a scalar.}]>:$global_step,
10948
10949    DefaultValuedAttr<BoolAttr, "false">:$use_locking
10950  );
10951
10952  let results = (outs);
10953
10954  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
10955}
10956
10957def TF_ResourceApplyAdagradV2Op : TF_Op<"ResourceApplyAdagradV2", []> {
10958  let summary = "Update '*var' according to the adagrad scheme.";
10959
10960  let description = [{
10961accum += grad * grad
10962var -= lr * grad * (1 / (sqrt(accum) + epsilon))
10963  }];
10964
10965  let arguments = (ins
10966    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10967    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
10968    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10969    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Constant factor. Must be a scalar.}]>:$epsilon,
10970    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
10971
10972    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
10973    DefaultValuedAttr<BoolAttr, "true">:$update_slots
10974  );
10975
10976  let results = (outs);
10977
10978  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
10979}
10980
10981def TF_ResourceApplyAdamOp : TF_Op<"ResourceApplyAdam", []> {
10982  let summary = "Update '*var' according to the Adam algorithm.";
10983
10984  let description = [{
10985$$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$
10986$$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$
10987$$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$
10988$$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$
10989  }];
10990
10991  let arguments = (ins
10992    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
10993    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
10994    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$v,
10995    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta1_power,
10996    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta2_power,
10997    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
10998    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta1,
10999    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum factor. Must be a scalar.}]>:$beta2,
11000    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11001    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11002
11003    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11004    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11005  );
11006
11007  let results = (outs);
11008
11009  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11010}
11011
11012def TF_ResourceApplyAddSignOp : TF_Op<"ResourceApplyAddSign", []> {
11013  let summary = "Update '*var' according to the AddSign update.";
11014
11015  let description = [{
11016m_t <- beta1 * m_{t-1} + (1 - beta1) * g
11017update <- (alpha + sign_decay * sign(g) *sign(m)) * g
11018variable <- variable - lr_t * update
11019  }];
11020
11021  let arguments = (ins
11022    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11023    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11024    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11025    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$alpha,
11026    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
11027    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
11028    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11029
11030    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11031  );
11032
11033  let results = (outs);
11034
11035  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11036}
11037
11038def TF_ResourceApplyCenteredRMSPropOp : TF_Op<"ResourceApplyCenteredRMSProp", []> {
11039  let summary = "Update '*var' according to the centered RMSProp algorithm.";
11040
11041  let description = [{
11042The centered RMSProp algorithm uses an estimate of the centered second moment
11043(i.e., the variance) for normalization, as opposed to regular RMSProp, which
11044uses the (uncentered) second moment. This often helps with training, but is
11045slightly more expensive in terms of computation and memory.
11046
11047Note that in dense implementation of this algorithm, mg, ms, and mom will
11048update even if the grad is zero, but in this sparse implementation, mg, ms,
11049and mom will not update in iterations during which the grad is zero.
11050
11051mean_square = decay * mean_square + (1-decay) * gradient ** 2
11052mean_grad = decay * mean_grad + (1-decay) * gradient
11053
11054Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
11055
11056mg <- rho * mg_{t-1} + (1-rho) * grad
11057ms <- rho * ms_{t-1} + (1-rho) * grad * grad
11058mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
11059var <- var - mom
11060  }];
11061
11062  let arguments = (ins
11063    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11064    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mg,
11065    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
11066    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
11067    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11068    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
11069    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum Scale. Must be a scalar.}]>:$momentum,
11070    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11071    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11072
11073    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11074  );
11075
11076  let results = (outs);
11077
11078  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
11079}
11080
11081def TF_ResourceApplyFtrlOp : TF_Op<"ResourceApplyFtrl", []> {
11082  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
11083
11084  let description = [{
11085accum_new = accum + grad * grad
11086linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
11087quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
11088var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
11089accum = accum_new
11090  }];
11091
11092  let arguments = (ins
11093    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11094    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11095    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
11096    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11097    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11098    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11099    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11100    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
11101
11102    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11103    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
11104  );
11105
11106  let results = (outs);
11107
11108  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11109}
11110
11111def TF_ResourceApplyFtrlV2Op : TF_Op<"ResourceApplyFtrlV2", []> {
11112  let summary = "Update '*var' according to the Ftrl-proximal scheme.";
11113
11114  let description = [{
11115grad_with_shrinkage = grad + 2 * l2_shrinkage * var
11116accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
11117linear += grad_with_shrinkage +
11118    (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
11119quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
11120var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0
11121accum = accum_new
11122  }];
11123
11124  let arguments = (ins
11125    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11126    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11127    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$linear,
11128    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11129    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11130    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11131    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 shrinkage regularization. Must be a scalar.}]>:$l2,
11132    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$l2_shrinkage,
11133    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr_power,
11134
11135    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11136    DefaultValuedAttr<BoolAttr, "false">:$multiply_linear_by_lr
11137  );
11138
11139  let results = (outs);
11140
11141  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11142}
11143
11144def TF_ResourceApplyGradientDescentOp : TF_Op<"ResourceApplyGradientDescent", []> {
11145  let summary = "Update '*var' by subtracting 'alpha' * 'delta' from it.";
11146
11147  let arguments = (ins
11148    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11149    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
11150    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
11151
11152    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11153  );
11154
11155  let results = (outs);
11156
11157  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11158}
11159
11160def TF_ResourceApplyKerasMomentumOp : TF_Op<"ResourceApplyKerasMomentum", []> {
11161  let summary = "Update '*var' according to the momentum scheme.";
11162
11163  let description = [{
11164Set use_nesterov = True if you want to use Nesterov momentum.
11165
11166accum = accum * momentum - lr * grad
11167var += accum
11168  }];
11169
11170  let arguments = (ins
11171    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11172    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11173    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11174    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11175    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
11176
11177    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11178    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11179  );
11180
11181  let results = (outs);
11182
11183  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11184}
11185
11186def TF_ResourceApplyMomentumOp : TF_Op<"ResourceApplyMomentum", []> {
11187  let summary = "Update '*var' according to the momentum scheme.";
11188
11189  let description = [{
11190Set use_nesterov = True if you want to use Nesterov momentum.
11191
11192accum = accum * momentum + grad
11193var -= lr * accum
11194  }];
11195
11196  let arguments = (ins
11197    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11198    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11199    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11200    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11201    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Momentum. Must be a scalar.}]>:$momentum,
11202
11203    DefaultValuedAttr<BoolAttr, "false">:$use_locking,
11204    DefaultValuedAttr<BoolAttr, "false">:$use_nesterov
11205  );
11206
11207  let results = (outs);
11208
11209  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11210}
11211
11212def TF_ResourceApplyPowerSignOp : TF_Op<"ResourceApplyPowerSign", []> {
11213  let summary = "Update '*var' according to the AddSign update.";
11214
11215  let description = [{
11216m_t <- beta1 * m_{t-1} + (1 - beta1) * g
11217update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
11218variable <- variable - lr_t * update
11219  }];
11220
11221  let arguments = (ins
11222    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11223    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$m,
11224    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11225    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$logbase,
11226    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$sign_decay,
11227    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Must be a scalar.}]>:$beta,
11228    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11229
11230    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11231  );
11232
11233  let results = (outs);
11234
11235  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11236}
11237
11238def TF_ResourceApplyProximalAdagradOp : TF_Op<"ResourceApplyProximalAdagrad", []> {
11239  let summary = [{
11240Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
11241  }];
11242
11243  let description = [{
11244accum += grad * grad
11245prox_v = var - lr * grad * (1 / sqrt(accum))
11246var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
11247  }];
11248
11249  let arguments = (ins
11250    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11251    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$accum,
11252    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11253    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11254    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11255    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11256
11257    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11258  );
11259
11260  let results = (outs);
11261
11262  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11263}
11264
11265def TF_ResourceApplyProximalGradientDescentOp : TF_Op<"ResourceApplyProximalGradientDescent", []> {
11266  let summary = "Update '*var' as FOBOS algorithm with fixed learning rate.";
11267
11268  let description = [{
11269prox_v = var - alpha * delta
11270var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
11271  }];
11272
11273  let arguments = (ins
11274    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11275    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$alpha,
11276    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L1 regularization. Must be a scalar.}]>:$l1,
11277    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{L2 regularization. Must be a scalar.}]>:$l2,
11278    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The change.}]>:$delta,
11279
11280    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11281  );
11282
11283  let results = (outs);
11284
11285  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
11286}
11287
11288def TF_ResourceApplyRMSPropOp : TF_Op<"ResourceApplyRMSProp", []> {
11289  let summary = "Update '*var' according to the RMSProp algorithm.";
11290
11291  let description = [{
11292Note that in dense implementation of this algorithm, ms and mom will
11293update even if the grad is zero, but in this sparse implementation, ms
11294and mom will not update in iterations during which the grad is zero.
11295
11296mean_square = decay * mean_square + (1-decay) * gradient ** 2
11297Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
11298
11299ms <- rho * ms_{t-1} + (1-rho) * grad * grad
11300mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
11301var <- var - mom
11302  }];
11303
11304  let arguments = (ins
11305    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$var,
11306    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$ms,
11307    Arg<TF_ResourceTensor, [{Should be from a Variable().}], [TF_VariableRead, TF_VariableWrite]>:$mom,
11308    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Scaling factor. Must be a scalar.}]>:$lr,
11309    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Decay rate. Must be a scalar.}]>:$rho,
11310    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$momentum,
11311    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Ridge term. Must be a scalar.}]>:$epsilon,
11312    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The gradient.}]>:$grad,
11313
11314    DefaultValuedAttr<BoolAttr, "false">:$use_locking
11315  );
11316
11317  let results = (outs);
11318
11319  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
11320}
11321
11322def TF_ResourceGatherOp : TF_Op<"ResourceGather", []> {
11323  let summary = [{
11324Gather slices from the variable pointed to by `resource` according to `indices`.
11325  }];
11326
11327  let description = [{
11328`indices` must be an integer tensor of any dimension (usually 0-D or 1-D).
11329Produces an output tensor with shape `indices.shape + params.shape[1:]` where:
11330
11331```python
11332    # Scalar indices
11333    output[:, ..., :] = params[indices, :, ... :]
11334
11335    # Vector indices
11336    output[i, :, ..., :] = params[indices[i], :, ... :]
11337
11338    # Higher rank indices
11339    output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
11340```
11341  }];
11342
11343  let arguments = (ins
11344    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$resource,
11345    TF_I32OrI64Tensor:$indices,
11346
11347    DefaultValuedAttr<I64Attr, "0">:$batch_dims,
11348    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
11349  );
11350
11351  let results = (outs
11352    TF_Tensor:$output
11353  );
11354
11355  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11356  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
11357}
11358
11359def TF_ResourceScatterAddOp : TF_Op<"ResourceScatterAdd", []> {
11360  let summary = "Adds sparse updates to the variable referenced by `resource`.";
11361
11362  let description = [{
11363This operation computes
11364
11365    # Scalar indices
11366    ref[indices, ...] += updates[...]
11367
11368    # Vector indices (for each i)
11369    ref[indices[i], ...] += updates[i, ...]
11370
11371    # High rank indices (for each i, ..., j)
11372    ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
11373
11374Duplicate entries are handled correctly: if multiple `indices` reference
11375the same location, their contributions add.
11376
11377Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11378
11379<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11380<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11381</div>
11382  }];
11383
11384  let arguments = (ins
11385    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11386    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11387    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11388  );
11389
11390  let results = (outs);
11391
11392  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11393  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11394}
11395
11396def TF_ResourceScatterDivOp : TF_Op<"ResourceScatterDiv", []> {
11397  let summary = [{
11398Divides sparse updates into the variable referenced by `resource`.
11399  }];
11400
11401  let description = [{
11402This operation computes
11403
11404    # Scalar indices
11405    ref[indices, ...] /= updates[...]
11406
11407    # Vector indices (for each i)
11408    ref[indices[i], ...] /= updates[i, ...]
11409
11410    # High rank indices (for each i, ..., j)
11411    ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
11412
11413Duplicate entries are handled correctly: if multiple `indices` reference
11414the same location, their contributions multiply.
11415
11416Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11417
11418<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11419<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11420</div>
11421  }];
11422
11423  let arguments = (ins
11424    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11425    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11426    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11427  );
11428
11429  let results = (outs);
11430
11431  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11432  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11433}
11434
11435def TF_ResourceScatterMaxOp : TF_Op<"ResourceScatterMax", []> {
11436  let summary = [{
11437Reduces sparse updates into the variable referenced by `resource` using the `max` operation.
11438  }];
11439
11440  let description = [{
11441This operation computes
11442
11443    # Scalar indices
11444    ref[indices, ...] = max(ref[indices, ...], updates[...])
11445
11446    # Vector indices (for each i)
11447    ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
11448
11449    # High rank indices (for each i, ..., j)
11450    ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
11451
11452Duplicate entries are handled correctly: if multiple `indices` reference
11453the same location, their contributions are combined.
11454
11455Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11456
11457<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11458<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11459</div>
11460  }];
11461
11462  let arguments = (ins
11463    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11464    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11465    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11466  );
11467
11468  let results = (outs);
11469
11470  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11471  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11472}
11473
11474def TF_ResourceScatterMinOp : TF_Op<"ResourceScatterMin", []> {
11475  let summary = [{
11476Reduces sparse updates into the variable referenced by `resource` using the `min` operation.
11477  }];
11478
11479  let description = [{
11480This operation computes
11481
11482    # Scalar indices
11483    ref[indices, ...] = min(ref[indices, ...], updates[...])
11484
11485    # Vector indices (for each i)
11486    ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
11487
11488    # High rank indices (for each i, ..., j)
11489    ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
11490
11491Duplicate entries are handled correctly: if multiple `indices` reference
11492the same location, their contributions are combined.
11493
11494Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11495
11496<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11497<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11498</div>
11499  }];
11500
11501  let arguments = (ins
11502    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11503    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11504    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11505  );
11506
11507  let results = (outs);
11508
11509  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11510  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11511}
11512
11513def TF_ResourceScatterMulOp : TF_Op<"ResourceScatterMul", []> {
11514  let summary = [{
11515Multiplies sparse updates into the variable referenced by `resource`.
11516  }];
11517
11518  let description = [{
11519This operation computes
11520
11521    # Scalar indices
11522    ref[indices, ...] *= updates[...]
11523
11524    # Vector indices (for each i)
11525    ref[indices[i], ...] *= updates[i, ...]
11526
11527    # High rank indices (for each i, ..., j)
11528    ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
11529
11530Duplicate entries are handled correctly: if multiple `indices` reference
11531the same location, their contributions multiply.
11532
11533Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11534
11535<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11536<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11537</div>
11538  }];
11539
11540  let arguments = (ins
11541    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11542    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11543    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11544  );
11545
11546  let results = (outs);
11547
11548  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11549  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11550}
11551
11552def TF_ResourceScatterNdAddOp : TF_Op<"ResourceScatterNdAdd", []> {
11553  let summary = [{
11554Applies sparse addition to individual values or slices in a Variable.
11555  }];
11556
11557  let description = [{
11558`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11559
11560`indices` must be integer tensor, containing indices into `ref`.
11561It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11562
11563The innermost dimension of `indices` (with length `K`) corresponds to
11564indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11565dimension of `ref`.
11566
11567`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11568
11569```
11570[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
11571```
11572
11573For example, say we want to add 4 scattered elements to a rank-1 tensor to
115748 elements. In Python, that addition would look like this:
11575
11576```python
11577ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
11578indices = tf.constant([[4], [3], [1], [7]])
11579updates = tf.constant([9, 10, 11, 12])
11580add = tf.scatter_nd_add(ref, indices, updates)
11581with tf.Session() as sess:
11582  print sess.run(add)
11583```
11584
11585The resulting update to ref would look like this:
11586
11587    [1, 13, 3, 14, 14, 6, 7, 20]
11588
11589See `tf.scatter_nd` for more details about how to make updates to
11590slices.
11591  }];
11592
11593  let arguments = (ins
11594    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11595    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11596A tensor of indices into ref.}]>:$indices,
11597    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
11598values to add to ref.}]>:$updates,
11599
11600    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11601  );
11602
11603  let results = (outs);
11604
11605  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11606  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11607}
11608
11609def TF_ResourceScatterNdSubOp : TF_Op<"ResourceScatterNdSub", []> {
11610  let summary = [{
11611Applies sparse subtraction to individual values or slices in a Variable.
11612  }];
11613
11614  let description = [{
11615`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11616
11617`indices` must be integer tensor, containing indices into `ref`.
11618It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11619
11620The innermost dimension of `indices` (with length `K`) corresponds to
11621indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11622dimension of `ref`.
11623
11624`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11625
11626```
11627[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
11628```
11629
11630For example, say we want to subtract 4 scattered elements from a rank-1 tensor
11631with 8 elements. In Python, that subtraction would look like this:
11632
11633```python
11634ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
11635indices = tf.constant([[4], [3], [1], [7]])
11636updates = tf.constant([9, 10, 11, 12])
11637sub = tf.scatter_nd_sub(ref, indices, updates)
11638with tf.Session() as sess:
11639  print sess.run(sub)
11640```
11641
11642The resulting update to ref would look like this:
11643
11644    [1, -9, 3, -6, -4, 6, 7, -4]
11645
11646See `tf.scatter_nd` for more details about how to make updates to
11647slices.
11648  }];
11649
11650  let arguments = (ins
11651    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11652    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11653A tensor of indices into ref.}]>:$indices,
11654    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of
11655values to add to ref.}]>:$updates,
11656
11657    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11658  );
11659
11660  let results = (outs);
11661
11662  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11663  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11664}
11665
11666def TF_ResourceScatterNdUpdateOp : TF_Op<"ResourceScatterNdUpdate", []> {
11667  let summary = [{
11668Applies sparse `updates` to individual values or slices within a given
11669  }];
11670
11671  let description = [{
11672variable according to `indices`.
11673
11674`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
11675
11676`indices` must be integer tensor, containing indices into `ref`.
11677It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
11678
11679The innermost dimension of `indices` (with length `K`) corresponds to
11680indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
11681dimension of `ref`.
11682
11683`updates` is `Tensor` of rank `Q-1+P-K` with shape:
11684
11685```
11686[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
11687```
11688
11689For example, say we want to update 4 scattered elements to a rank-1 tensor to
116908 elements. In Python, that update would look like this:
11691
11692```python
11693    ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
11694    indices = tf.constant([[4], [3], [1] ,[7]])
11695    updates = tf.constant([9, 10, 11, 12])
11696    update = tf.scatter_nd_update(ref, indices, updates)
11697    with tf.Session() as sess:
11698      print sess.run(update)
11699```
11700
11701The resulting update to ref would look like this:
11702
11703    [1, 11, 3, 10, 9, 6, 7, 12]
11704
11705See `tf.scatter_nd` for more details about how to make updates to
11706slices.
11707  }];
11708
11709  let arguments = (ins
11710    Arg<TF_ResourceTensor, [{A resource handle. Must be from a VarHandleOp.}], [TF_VariableRead, TF_VariableWrite]>:$ref,
11711    Arg<TF_I32OrI64Tensor, [{A Tensor. Must be one of the following types: int32, int64.
11712A tensor of indices into ref.}]>:$indices,
11713    Arg<TF_Tensor, [{A Tensor. Must have the same type as ref. A tensor of updated
11714values to add to ref.}]>:$updates,
11715
11716    DefaultValuedAttr<BoolAttr, "true">:$use_locking
11717  );
11718
11719  let results = (outs);
11720
11721  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11722  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
11723}
11724
11725def TF_ResourceScatterSubOp : TF_Op<"ResourceScatterSub", []> {
11726  let summary = [{
11727Subtracts sparse updates from the variable referenced by `resource`.
11728  }];
11729
11730  let description = [{
11731This operation computes
11732
11733    # Scalar indices
11734    ref[indices, ...] -= updates[...]
11735
11736    # Vector indices (for each i)
11737    ref[indices[i], ...] -= updates[i, ...]
11738
11739    # High rank indices (for each i, ..., j)
11740    ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
11741
11742Duplicate entries are handled correctly: if multiple `indices` reference
11743the same location, their contributions add.
11744
11745Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`.
11746
11747<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
11748<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
11749</div>
11750  }];
11751
11752  let arguments = (ins
11753    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11754    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11755    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{A tensor of updated values to add to `ref`.}]>:$updates
11756  );
11757
11758  let results = (outs);
11759
11760  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11761  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11762}
11763
11764def TF_ResourceScatterUpdateOp : TF_Op<"ResourceScatterUpdate", []> {
11765  let summary = [{
11766Assigns sparse updates to the variable referenced by `resource`.
11767  }];
11768
11769  let description = [{
11770This operation computes
11771
11772    # Scalar indices
11773    ref[indices, ...] = updates[...]
11774
11775    # Vector indices (for each i)
11776    ref[indices[i], ...] = updates[i, ...]
11777
11778    # High rank indices (for each i, ..., j)
11779    ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
11780  }];
11781
11782  let arguments = (ins
11783    Arg<TF_ResourceTensor, [{Should be from a `Variable` node.}], [TF_VariableRead, TF_VariableWrite]>:$resource,
11784    Arg<TF_I32OrI64Tensor, [{A tensor of indices into the first dimension of `ref`.}]>:$indices,
11785    Arg<TF_Tensor, [{A tensor of updated values to add to `ref`.}]>:$updates
11786  );
11787
11788  let results = (outs);
11789
11790  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
11791  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
11792}
11793
11794def TF_ResourceStridedSliceAssignOp : TF_Op<"ResourceStridedSliceAssign", []> {
11795  let summary = "Assign `value` to the sliced l-value reference of `ref`.";
11796
11797  let description = [{
11798The values of `value` are assigned to the positions in the variable
11799`ref` that are selected by the slice parameters. The slice parameters
11800`begin, `end`, `strides`, etc. work exactly as in `StridedSlice`.
11801
11802NOTE this op currently does not support broadcasting and so `value`'s
11803shape must be exactly the shape produced by the slice of `ref`.
11804  }];
11805
11806  let arguments = (ins
11807    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$ref,
11808    TF_I32OrI64Tensor:$begin,
11809    TF_I32OrI64Tensor:$end,
11810    TF_I32OrI64Tensor:$strides,
11811    TF_Tensor:$value,
11812
11813    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
11814    DefaultValuedAttr<I64Attr, "0">:$end_mask,
11815    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
11816    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
11817    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
11818  );
11819
11820  let results = (outs);
11821
11822  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
11823  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
11824}
11825
11826def TF_RestoreOp : TF_Op<"Restore", []> {
11827  let summary = "Restores a tensor from checkpoint files.";
11828
11829  let description = [{
11830Reads a tensor stored in one or several files. If there are several files (for
11831instance because a tensor was saved as slices), `file_pattern` may contain
11832wildcard symbols (`*` and `?`) in the filename portion only, not in the
11833directory portion.
11834
11835If a `file_pattern` matches several files, `preferred_shard` can be used to hint
11836in which file the requested tensor is likely to be found. This op will first
11837open the file at index `preferred_shard` in the list of matching files and try
11838to restore tensors from that file.  Only if some tensors or tensor slices are
11839not found in that first file, then the Op opens all the files. Setting
11840`preferred_shard` to match the value passed as the `shard` input
11841of a matching `Save` Op may speed up Restore.  This attribute only affects
11842performance, not correctness.  The default value -1 means files are processed in
11843order.
11844
11845See also `RestoreSlice`.
11846  }];
11847
11848  let arguments = (ins
11849    Arg<TF_StrTensor, [{Must have a single element. The pattern of the files from
11850which we read the tensor.}]>:$file_pattern,
11851    Arg<TF_StrTensor, [{Must have a single element. The name of the tensor to be
11852restored.}]>:$tensor_name,
11853
11854    DefaultValuedAttr<I64Attr, "-1">:$preferred_shard
11855  );
11856
11857  let results = (outs
11858    Res<TF_Tensor, [{The restored tensor.}]>:$tensor
11859  );
11860
11861  TF_DerivedResultTypeAttr dt = TF_DerivedResultTypeAttr<0>;
11862}
11863
11864def TF_RestoreV2Op : TF_Op<"RestoreV2", []> {
11865  let summary = "Restores tensors from a V2 checkpoint.";
11866
11867  let description = [{
11868For backward compatibility with the V1 format, this Op currently allows
11869restoring from a V1 checkpoint as well:
11870  - This Op first attempts to find the V2 index file pointed to by "prefix", and
11871    if found proceed to read it as a V2 checkpoint;
11872  - Otherwise the V1 read path is invoked.
11873Relying on this behavior is not recommended, as the ability to fall back to read
11874V1 might be deprecated and eventually removed.
11875
11876By default, restores the named tensors in full.  If the caller wishes to restore
11877specific slices of stored tensors, "shape_and_slices" should be non-empty
11878strings and correspondingly well-formed.
11879
11880Callers must ensure all the named tensors are indeed stored in the checkpoint.
11881  }];
11882
11883  let arguments = (ins
11884    Arg<TF_StrTensor, [{Must have a single element.  The prefix of a V2 checkpoint.}]>:$prefix,
11885    Arg<TF_StrTensor, [{shape {N}.  The names of the tensors to be restored.}]>:$tensor_names,
11886    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be restored.
11887Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices
11888  );
11889
11890  let results = (outs
11891    Res<Variadic<TF_Tensor>, [{shape {N}.  The restored tensors, whose shapes are read from the
11892checkpoint directly.}]>:$tensors
11893  );
11894
11895  TF_DerivedResultTypeListAttr dtypes = TF_DerivedResultTypeListAttr<0>;
11896}
11897
11898def TF_RetrieveTPUEmbeddingADAMParametersOp : TF_Op<"RetrieveTPUEmbeddingADAMParameters", [TF_TPUEmbeddingSideEffect]> {
11899  let summary = "Retrieve ADAM embedding parameters.";
11900
11901  let description = [{
11902An op that retrieves optimization parameters from embedding to host
11903memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11904the correct embedding table configuration. For example, this op is
11905used to retrieve updated parameters before saving a checkpoint.
11906  }];
11907
11908  let arguments = (ins
11909    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11910    StrAttr:$table_name,
11911    I64Attr:$num_shards,
11912    I64Attr:$shard_id,
11913    StrAttr:$config
11914  );
11915
11916  let results = (outs
11917    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
11918    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
11919    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities
11920  );
11921}
11922
11923def TF_RetrieveTPUEmbeddingADAMParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingADAMParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11924  let summary = "Retrieve ADAM embedding parameters with debug support.";
11925
11926  let description = [{
11927An op that retrieves optimization parameters from embedding to host
11928memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11929the correct embedding table configuration. For example, this op is
11930used to retrieve updated parameters before saving a checkpoint.
11931  }];
11932
11933  let arguments = (ins
11934    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11935    StrAttr:$table_name,
11936    I64Attr:$num_shards,
11937    I64Attr:$shard_id,
11938    StrAttr:$config
11939  );
11940
11941  let results = (outs
11942    Res<TF_Float32Tensor, [{Parameter parameters updated by the ADAM optimization algorithm.}]>:$parameters,
11943    Res<TF_Float32Tensor, [{Parameter momenta updated by the ADAM optimization algorithm.}]>:$momenta,
11944    Res<TF_Float32Tensor, [{Parameter velocities updated by the ADAM optimization algorithm.}]>:$velocities,
11945    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the ADAM optimization algorithm.}]>:$gradient_accumulators
11946  );
11947}
11948
11949def TF_RetrieveTPUEmbeddingAdadeltaParametersOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParameters", [TF_TPUEmbeddingSideEffect]> {
11950  let summary = "Retrieve Adadelta embedding parameters.";
11951
11952  let description = [{
11953An op that retrieves optimization parameters from embedding to host
11954memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11955the correct embedding table configuration. For example, this op is
11956used to retrieve updated parameters before saving a checkpoint.
11957  }];
11958
11959  let arguments = (ins
11960    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11961    StrAttr:$table_name,
11962    I64Attr:$num_shards,
11963    I64Attr:$shard_id,
11964    StrAttr:$config
11965  );
11966
11967  let results = (outs
11968    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
11969    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
11970    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates
11971  );
11972}
11973
11974def TF_RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
11975  let summary = "Retrieve Adadelta embedding parameters with debug support.";
11976
11977  let description = [{
11978An op that retrieves optimization parameters from embedding to host
11979memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
11980the correct embedding table configuration. For example, this op is
11981used to retrieve updated parameters before saving a checkpoint.
11982  }];
11983
11984  let arguments = (ins
11985    DefaultValuedAttr<I64Attr, "-1">:$table_id,
11986    StrAttr:$table_name,
11987    I64Attr:$num_shards,
11988    I64Attr:$shard_id,
11989    StrAttr:$config
11990  );
11991
11992  let results = (outs
11993    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adadelta optimization algorithm.}]>:$parameters,
11994    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adadelta optimization algorithm.}]>:$accumulators,
11995    Res<TF_Float32Tensor, [{Parameter updates updated by the Adadelta optimization algorithm.}]>:$updates,
11996    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
11997  );
11998}
11999
12000def TF_RetrieveTPUEmbeddingAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
12001  let summary = "Retrieve Adagrad embedding parameters.";
12002
12003  let description = [{
12004An op that retrieves optimization parameters from embedding to host
12005memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12006the correct embedding table configuration. For example, this op is
12007used to retrieve updated parameters before saving a checkpoint.
12008  }];
12009
12010  let arguments = (ins
12011    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12012    StrAttr:$table_name,
12013    I64Attr:$num_shards,
12014    I64Attr:$shard_id,
12015    StrAttr:$config
12016  );
12017
12018  let results = (outs
12019    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
12020    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators
12021  );
12022}
12023
12024def TF_RetrieveTPUEmbeddingAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12025  let summary = "Retrieve Adagrad embedding parameters with debug support.";
12026
12027  let description = [{
12028An op that retrieves optimization parameters from embedding to host
12029memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12030the correct embedding table configuration. For example, this op is
12031used to retrieve updated parameters before saving a checkpoint.
12032  }];
12033
12034  let arguments = (ins
12035    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12036    StrAttr:$table_name,
12037    I64Attr:$num_shards,
12038    I64Attr:$shard_id,
12039    StrAttr:$config
12040  );
12041
12042  let results = (outs
12043    Res<TF_Float32Tensor, [{Parameter parameters updated by the Adagrad optimization algorithm.}]>:$parameters,
12044    Res<TF_Float32Tensor, [{Parameter accumulators updated by the Adagrad optimization algorithm.}]>:$accumulators,
12045    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adagrad optimization algorithm.}]>:$gradient_accumulators
12046  );
12047}
12048
12049def TF_RetrieveTPUEmbeddingCenteredRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingCenteredRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
12050  let summary = "Retrieve centered RMSProp embedding parameters.";
12051
12052  let description = [{
12053An op that retrieves optimization parameters from embedding to host
12054memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12055the correct embedding table configuration. For example, this op is
12056used to retrieve updated parameters before saving a checkpoint.
12057  }];
12058
12059  let arguments = (ins
12060    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12061    StrAttr:$table_name,
12062    I64Attr:$num_shards,
12063    I64Attr:$shard_id,
12064    StrAttr:$config
12065  );
12066
12067  let results = (outs
12068    Res<TF_Float32Tensor, [{Parameter parameters updated by the centered RMSProp optimization algorithm.}]>:$parameters,
12069    Res<TF_Float32Tensor, [{Parameter ms updated by the centered RMSProp optimization algorithm.}]>:$ms,
12070    Res<TF_Float32Tensor, [{Parameter mom updated by the centered RMSProp optimization algorithm.}]>:$mom,
12071    Res<TF_Float32Tensor, [{Parameter mg updated by the centered RMSProp optimization algorithm.}]>:$mg
12072  );
12073}
12074
12075def TF_RetrieveTPUEmbeddingFTRLParametersOp : TF_Op<"RetrieveTPUEmbeddingFTRLParameters", [TF_TPUEmbeddingSideEffect]> {
12076  let summary = "Retrieve FTRL embedding parameters.";
12077
12078  let description = [{
12079An op that retrieves optimization parameters from embedding to host
12080memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12081the correct embedding table configuration. For example, this op is
12082used to retrieve updated parameters before saving a checkpoint.
12083  }];
12084
12085  let arguments = (ins
12086    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12087    StrAttr:$table_name,
12088    I64Attr:$num_shards,
12089    I64Attr:$shard_id,
12090    StrAttr:$config
12091  );
12092
12093  let results = (outs
12094    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
12095    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
12096    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears
12097  );
12098}
12099
12100def TF_RetrieveTPUEmbeddingFTRLParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingFTRLParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12101  let summary = "Retrieve FTRL embedding parameters with debug support.";
12102
12103  let description = [{
12104An op that retrieves optimization parameters from embedding to host
12105memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12106the correct embedding table configuration. For example, this op is
12107used to retrieve updated parameters before saving a checkpoint.
12108  }];
12109
12110  let arguments = (ins
12111    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12112    StrAttr:$table_name,
12113    I64Attr:$num_shards,
12114    I64Attr:$shard_id,
12115    StrAttr:$config
12116  );
12117
12118  let results = (outs
12119    Res<TF_Float32Tensor, [{Parameter parameters updated by the FTRL optimization algorithm.}]>:$parameters,
12120    Res<TF_Float32Tensor, [{Parameter accumulators updated by the FTRL optimization algorithm.}]>:$accumulators,
12121    Res<TF_Float32Tensor, [{Parameter linears updated by the FTRL optimization algorithm.}]>:$linears,
12122    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the FTRL optimization algorithm.}]>:$gradient_accumulators
12123  );
12124}
12125
12126def TF_RetrieveTPUEmbeddingMDLAdagradLightParametersOp : TF_Op<"RetrieveTPUEmbeddingMDLAdagradLightParameters", [TF_TPUEmbeddingSideEffect]> {
12127  let summary = "Retrieve MDL Adagrad Light embedding parameters.";
12128
12129  let description = [{
12130An op that retrieves optimization parameters from embedding to host
12131memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12132the correct embedding table configuration. For example, this op is
12133used to retrieve updated parameters before saving a checkpoint.
12134  }];
12135
12136  let arguments = (ins
12137    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12138    StrAttr:$table_name,
12139    I64Attr:$num_shards,
12140    I64Attr:$shard_id,
12141    StrAttr:$config
12142  );
12143
12144  let results = (outs
12145    Res<TF_Float32Tensor, [{Parameter parameters updated by the MDL Adagrad Light optimization algorithm.}]>:$parameters,
12146    Res<TF_Float32Tensor, [{Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.}]>:$accumulators,
12147    Res<TF_Float32Tensor, [{Parameter weights updated by the MDL Adagrad Light optimization algorithm.}]>:$weights,
12148    Res<TF_Float32Tensor, [{Parameter benefits updated by the MDL Adagrad Light optimization algorithm.}]>:$benefits
12149  );
12150}
12151
12152def TF_RetrieveTPUEmbeddingMomentumParametersOp : TF_Op<"RetrieveTPUEmbeddingMomentumParameters", [TF_TPUEmbeddingSideEffect]> {
12153  let summary = "Retrieve Momentum embedding parameters.";
12154
12155  let description = [{
12156An op that retrieves optimization parameters from embedding to host
12157memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12158the correct embedding table configuration. For example, this op is
12159used to retrieve updated parameters before saving a checkpoint.
12160  }];
12161
12162  let arguments = (ins
12163    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12164    StrAttr:$table_name,
12165    I64Attr:$num_shards,
12166    I64Attr:$shard_id,
12167    StrAttr:$config
12168  );
12169
12170  let results = (outs
12171    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
12172    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta
12173  );
12174}
12175
12176def TF_RetrieveTPUEmbeddingMomentumParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingMomentumParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12177  let summary = "Retrieve Momentum embedding parameters with debug support.";
12178
12179  let description = [{
12180An op that retrieves optimization parameters from embedding to host
12181memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12182the correct embedding table configuration. For example, this op is
12183used to retrieve updated parameters before saving a checkpoint.
12184  }];
12185
12186  let arguments = (ins
12187    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12188    StrAttr:$table_name,
12189    I64Attr:$num_shards,
12190    I64Attr:$shard_id,
12191    StrAttr:$config
12192  );
12193
12194  let results = (outs
12195    Res<TF_Float32Tensor, [{Parameter parameters updated by the Momentum optimization algorithm.}]>:$parameters,
12196    Res<TF_Float32Tensor, [{Parameter momenta updated by the Momentum optimization algorithm.}]>:$momenta,
12197    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Momentum optimization algorithm.}]>:$gradient_accumulators
12198  );
12199}
12200
12201def TF_RetrieveTPUEmbeddingProximalAdagradParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParameters", [TF_TPUEmbeddingSideEffect]> {
12202  let summary = "Retrieve proximal Adagrad embedding parameters.";
12203
12204  let description = [{
12205An op that retrieves optimization parameters from embedding to host
12206memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12207the correct embedding table configuration. For example, this op is
12208used to retrieve updated parameters before saving a checkpoint.
12209  }];
12210
12211  let arguments = (ins
12212    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12213    StrAttr:$table_name,
12214    I64Attr:$num_shards,
12215    I64Attr:$shard_id,
12216    StrAttr:$config
12217  );
12218
12219  let results = (outs
12220    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
12221    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators
12222  );
12223}
12224
12225def TF_RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12226  let summary = [{
12227Retrieve proximal Adagrad embedding parameters with debug support.
12228  }];
12229
12230  let description = [{
12231An op that retrieves optimization parameters from embedding to host
12232memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12233the correct embedding table configuration. For example, this op is
12234used to retrieve updated parameters before saving a checkpoint.
12235  }];
12236
12237  let arguments = (ins
12238    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12239    StrAttr:$table_name,
12240    I64Attr:$num_shards,
12241    I64Attr:$shard_id,
12242    StrAttr:$config
12243  );
12244
12245  let results = (outs
12246    Res<TF_Float32Tensor, [{Parameter parameters updated by the proximal Adagrad optimization algorithm.}]>:$parameters,
12247    Res<TF_Float32Tensor, [{Parameter accumulators updated by the proximal Adagrad optimization algorithm.}]>:$accumulators,
12248    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.}]>:$gradient_accumulators
12249  );
12250}
12251
12252def TF_RetrieveTPUEmbeddingProximalYogiParametersOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParameters", [TF_TPUEmbeddingSideEffect]> {
12253  let summary = "";
12254
12255  let arguments = (ins
12256    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12257    StrAttr:$table_name,
12258    I64Attr:$num_shards,
12259    I64Attr:$shard_id,
12260    StrAttr:$config
12261  );
12262
12263  let results = (outs
12264    TF_Float32Tensor:$parameters,
12265    TF_Float32Tensor:$v,
12266    TF_Float32Tensor:$m
12267  );
12268}
12269
12270def TF_RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingProximalYogiParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12271  let summary = "";
12272
12273  let arguments = (ins
12274    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12275    StrAttr:$table_name,
12276    I64Attr:$num_shards,
12277    I64Attr:$shard_id,
12278    StrAttr:$config
12279  );
12280
12281  let results = (outs
12282    TF_Float32Tensor:$parameters,
12283    TF_Float32Tensor:$v,
12284    TF_Float32Tensor:$m,
12285    TF_Float32Tensor:$gradient_accumulators
12286  );
12287}
12288
12289def TF_RetrieveTPUEmbeddingRMSPropParametersOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParameters", [TF_TPUEmbeddingSideEffect]> {
12290  let summary = "Retrieve RMSProp embedding parameters.";
12291
12292  let description = [{
12293An op that retrieves optimization parameters from embedding to host
12294memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12295the correct embedding table configuration. For example, this op is
12296used to retrieve updated parameters before saving a checkpoint.
12297  }];
12298
12299  let arguments = (ins
12300    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12301    StrAttr:$table_name,
12302    I64Attr:$num_shards,
12303    I64Attr:$shard_id,
12304    StrAttr:$config
12305  );
12306
12307  let results = (outs
12308    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
12309    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
12310    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom
12311  );
12312}
12313
12314def TF_RetrieveTPUEmbeddingRMSPropParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12315  let summary = "Retrieve RMSProp embedding parameters with debug support.";
12316
12317  let description = [{
12318An op that retrieves optimization parameters from embedding to host
12319memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12320the correct embedding table configuration. For example, this op is
12321used to retrieve updated parameters before saving a checkpoint.
12322  }];
12323
12324  let arguments = (ins
12325    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12326    StrAttr:$table_name,
12327    I64Attr:$num_shards,
12328    I64Attr:$shard_id,
12329    StrAttr:$config
12330  );
12331
12332  let results = (outs
12333    Res<TF_Float32Tensor, [{Parameter parameters updated by the RMSProp optimization algorithm.}]>:$parameters,
12334    Res<TF_Float32Tensor, [{Parameter ms updated by the RMSProp optimization algorithm.}]>:$ms,
12335    Res<TF_Float32Tensor, [{Parameter mom updated by the RMSProp optimization algorithm.}]>:$mom,
12336    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the RMSProp optimization algorithm.}]>:$gradient_accumulators
12337  );
12338}
12339
12340def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParameters", [TF_TPUEmbeddingSideEffect]> {
12341  let summary = "Retrieve SGD embedding parameters.";
12342
12343  let description = [{
12344An op that retrieves optimization parameters from embedding to host
12345memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12346the correct embedding table configuration. For example, this op is
12347used to retrieve updated parameters before saving a checkpoint.
12348  }];
12349
12350  let arguments = (ins
12351    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12352    StrAttr:$table_name,
12353    I64Attr:$num_shards,
12354    I64Attr:$shard_id,
12355    StrAttr:$config
12356  );
12357
12358  let results = (outs
12359    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters
12360  );
12361}
12362
12363def TF_RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebugOp : TF_Op<"RetrieveTPUEmbeddingStochasticGradientDescentParametersGradAccumDebug", [TF_TPUEmbeddingSideEffect]> {
12364  let summary = "Retrieve SGD embedding parameters with debug support.";
12365
12366  let description = [{
12367An op that retrieves optimization parameters from embedding to host
12368memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
12369the correct embedding table configuration. For example, this op is
12370used to retrieve updated parameters before saving a checkpoint.
12371  }];
12372
12373  let arguments = (ins
12374    DefaultValuedAttr<I64Attr, "-1">:$table_id,
12375    StrAttr:$table_name,
12376    I64Attr:$num_shards,
12377    I64Attr:$shard_id,
12378    StrAttr:$config
12379  );
12380
12381  let results = (outs
12382    Res<TF_Float32Tensor, [{Parameter parameters updated by the stochastic gradient descent optimization algorithm.}]>:$parameters,
12383    Res<TF_Float32Tensor, [{Parameter gradient_accumulators updated by the Adadelta optimization algorithm.}]>:$gradient_accumulators
12384  );
12385}
12386
12387def TF_ReverseSequenceOp : TF_Op<"ReverseSequence", [NoSideEffect]> {
12388  let summary = "Reverses variable length slices.";
12389
12390  let description = [{
12391This op first slices `input` along the dimension `batch_dim`, and for each
12392slice `i`, reverses the first `seq_lengths[i]` elements along
12393the dimension `seq_dim`.
12394
12395The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`,
12396and `seq_lengths` must be a vector of length `input.dims[batch_dim]`.
12397
12398The output slice `i` along dimension `batch_dim` is then given by input
12399slice `i`, with the first `seq_lengths[i]` slices along dimension
12400`seq_dim` reversed.
12401
12402For example:
12403
12404```
12405# Given this:
12406batch_dim = 0
12407seq_dim = 1
12408input.dims = (4, 8, ...)
12409seq_lengths = [7, 2, 3, 5]
12410
12411# then slices of input are reversed on seq_dim, but only up to seq_lengths:
12412output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
12413output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
12414output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
12415output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
12416
12417# while entries past seq_lens are copied through:
12418output[0, 7:, :, ...] = input[0, 7:, :, ...]
12419output[1, 2:, :, ...] = input[1, 2:, :, ...]
12420output[2, 3:, :, ...] = input[2, 3:, :, ...]
12421output[3, 2:, :, ...] = input[3, 2:, :, ...]
12422```
12423
12424In contrast, if:
12425
12426```
12427# Given this:
12428batch_dim = 2
12429seq_dim = 0
12430input.dims = (8, ?, 4, ...)
12431seq_lengths = [7, 2, 3, 5]
12432
12433# then slices of input are reversed on seq_dim, but only up to seq_lengths:
12434output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
12435output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
12436output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
12437output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
12438
12439# while entries past seq_lens are copied through:
12440output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
12441output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
12442output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
12443output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
12444```
12445  }];
12446
12447  let arguments = (ins
12448    Arg<TF_Tensor, [{The input to reverse.}]>:$input,
12449    Arg<TF_I32OrI64Tensor, [{1-D with length `input.dims(batch_dim)` and
12450`max(seq_lengths) <= input.dims(seq_dim)`}]>:$seq_lengths,
12451
12452    I64Attr:$seq_dim,
12453    DefaultValuedAttr<I64Attr, "0">:$batch_dim
12454  );
12455
12456  let results = (outs
12457    Res<TF_Tensor, [{The partially reversed input. It has the same shape as `input`.}]>:$output
12458  );
12459
12460  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
12461  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12462}
12463
12464def TF_ReverseV2Op : TF_Op<"ReverseV2", [NoSideEffect]> {
12465  let summary = "Reverses specific dimensions of a tensor.";
12466
12467  let description = [{
12468NOTE `tf.reverse` has now changed behavior in preparation for 1.0.
12469`tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0.
12470
12471Given a `tensor`, and a `int32` tensor `axis` representing the set of
12472dimensions of `tensor` to reverse. This operation reverses each dimension
12473`i` for which there exists `j` s.t. `axis[j] == i`.
12474
12475`tensor` can have up to 8 dimensions. The number of dimensions specified
12476in `axis` may be 0 or more entries. If an index is specified more than
12477once, a InvalidArgument error is raised.
12478
12479For example:
12480
12481```
12482# tensor 't' is [[[[ 0,  1,  2,  3],
12483#                  [ 4,  5,  6,  7],
12484#                  [ 8,  9, 10, 11]],
12485#                 [[12, 13, 14, 15],
12486#                  [16, 17, 18, 19],
12487#                  [20, 21, 22, 23]]]]
12488# tensor 't' shape is [1, 2, 3, 4]
12489
12490# 'dims' is [3] or 'dims' is [-1]
12491reverse(t, dims) ==> [[[[ 3,  2,  1,  0],
12492                        [ 7,  6,  5,  4],
12493                        [ 11, 10, 9, 8]],
12494                       [[15, 14, 13, 12],
12495                        [19, 18, 17, 16],
12496                        [23, 22, 21, 20]]]]
12497
12498# 'dims' is '[1]' (or 'dims' is '[-3]')
12499reverse(t, dims) ==> [[[[12, 13, 14, 15],
12500                        [16, 17, 18, 19],
12501                        [20, 21, 22, 23]
12502                       [[ 0,  1,  2,  3],
12503                        [ 4,  5,  6,  7],
12504                        [ 8,  9, 10, 11]]]]
12505
12506# 'dims' is '[2]' (or 'dims' is '[-2]')
12507reverse(t, dims) ==> [[[[8, 9, 10, 11],
12508                        [4, 5, 6, 7],
12509                        [0, 1, 2, 3]]
12510                       [[20, 21, 22, 23],
12511                        [16, 17, 18, 19],
12512                        [12, 13, 14, 15]]]]
12513```
12514  }];
12515
12516  let arguments = (ins
12517    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{Up to 8-D.}]>:$tensor,
12518    Arg<TF_I32OrI64Tensor, [{1-D. The indices of the dimensions to reverse. Must be in the range
12519`[-rank(tensor), rank(tensor))`.}]>:$axis
12520  );
12521
12522  let results = (outs
12523    Res<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Str, TF_Uint16, TF_Uint8]>, [{The same shape as `tensor`.}]>:$output
12524  );
12525
12526  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12527  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
12528}
12529
12530def TF_RightShiftOp : TF_Op<"RightShift", [NoSideEffect, ResultsBroadcastableShape]>,
12531                      WithBroadcastableBinOpBuilder {
12532  let summary = "Elementwise computes the bitwise right-shift of `x` and `y`.";
12533
12534  let description = [{
12535Performs a logical shift for unsigned integer types, and an arithmetic shift
12536for signed integer types.
12537
12538If `y` is negative, or greater than or equal to than the width of `x` in bits
12539the result is implementation defined.
12540
12541Example:
12542
12543```python
12544import tensorflow as tf
12545from tensorflow.python.ops import bitwise_ops
12546import numpy as np
12547dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64]
12548
12549for dtype in dtype_list:
12550  lhs = tf.constant([-1, -5, -3, -14], dtype=dtype)
12551  rhs = tf.constant([5, 0, 7, 11], dtype=dtype)
12552
12553  right_shift_result = bitwise_ops.right_shift(lhs, rhs)
12554
12555  print(right_shift_result)
12556
12557# This will print:
12558# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8)
12559# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16)
12560# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32)
12561# tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64)
12562
12563lhs = np.array([-2, 64, 101, 32], dtype=np.int8)
12564rhs = np.array([-1, -5, -3, -14], dtype=np.int8)
12565bitwise_ops.right_shift(lhs, rhs)
12566# <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2,  64, 101,  32], dtype=int8)>
12567```
12568  }];
12569
12570  let arguments = (ins
12571    TF_IntTensor:$x,
12572    TF_IntTensor:$y
12573  );
12574
12575  let results = (outs
12576    TF_IntTensor:$z
12577  );
12578
12579  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12580}
12581
12582def TF_RintOp : TF_Op<"Rint", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
12583  let summary = "Returns element-wise integer closest to x.";
12584
12585  let description = [{
12586If the result is midway between two representable values,
12587the even representable is chosen.
12588For example:
12589
12590```
12591rint(-1.5) ==> -2.0
12592rint(0.5000001) ==> 1.0
12593rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
12594```
12595  }];
12596
12597  let arguments = (ins
12598    TF_FloatTensor:$x
12599  );
12600
12601  let results = (outs
12602    TF_FloatTensor:$y
12603  );
12604
12605  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12606}
12607
12608def TF_RiscAddOp : TF_Op<"RiscAdd", [Commutative, NoSideEffect]> {
12609  let summary = "Returns x + y element-wise.";
12610
12611  let description = [{
12612*NOTE*: `RiscAdd` does not supports broadcasting.
12613
12614Given two input tensors, the `tf.risc_add` operation computes the sum for every element in the tensor.
12615
12616Both input and output have a range `(-inf, inf)`.
12617  }];
12618
12619  let arguments = (ins
12620    TF_FloatTensor:$x,
12621    TF_FloatTensor:$y
12622  );
12623
12624  let results = (outs
12625    TF_FloatTensor:$z
12626  );
12627
12628  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12629}
12630
12631def TF_RiscDotOp : TF_Op<"RiscDot", [NoSideEffect]> {
12632  let summary = "";
12633
12634  let arguments = (ins
12635    TF_FloatTensor:$a,
12636    TF_FloatTensor:$b,
12637
12638    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
12639    DefaultValuedAttr<BoolAttr, "false">:$transpose_b
12640  );
12641
12642  let results = (outs
12643    TF_FloatTensor:$product
12644  );
12645
12646  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12647}
12648
12649def TF_RngReadAndSkipOp : TF_Op<"RngReadAndSkip", []> {
12650  let summary = "Advance the counter of a counter-based RNG.";
12651
12652  let description = [{
12653The state of the RNG after
12654`rng_read_and_skip(n)` will be the same as that after `uniform([n])`
12655(or any other distribution). The actual increment added to the
12656counter is an unspecified implementation choice.
12657  }];
12658
12659  let arguments = (ins
12660    TF_ResourceTensor:$resource,
12661    TF_Int32Tensor:$alg,
12662    TF_Uint64Tensor:$delta
12663  );
12664
12665  let results = (outs
12666    TF_Int64Tensor:$value
12667  );
12668}
12669
12670def TF_RollOp : TF_Op<"Roll", [NoSideEffect]> {
12671  let summary = "Rolls the elements of a tensor along an axis.";
12672
12673  let description = [{
12674The elements are shifted positively (towards larger indices) by the offset of
12675`shift` along the dimension of `axis`. Negative `shift` values will shift
12676elements in the opposite direction. Elements that roll passed the last position
12677will wrap around to the first and vice versa. Multiple shifts along multiple
12678axes may be specified.
12679
12680For example:
12681
12682```
12683# 't' is [0, 1, 2, 3, 4]
12684roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2]
12685
12686# shifting along multiple dimensions
12687# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
12688roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
12689
12690# shifting along the same axis multiple times
12691# 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
12692roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
12693```
12694  }];
12695
12696  let arguments = (ins
12697    TF_Tensor:$input,
12698    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which
12699elements are shifted positively (towards larger indices) along the dimension
12700specified by `axis[i]`. Negative shifts will roll the elements in the opposite
12701direction.}]>:$shift,
12702    Arg<TF_I32OrI64Tensor, [{Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift
12703`shift[i]` should occur. If the same axis is referenced more than once, the
12704total shift for that axis will be the sum of all the shifts that belong to that
12705axis.}]>:$axis
12706  );
12707
12708  let results = (outs
12709    Res<TF_Tensor, [{Has the same shape and size as the input. The elements are shifted
12710positively (towards larger indices) by the offsets of `shift` along the
12711dimensions of `axis`.}]>:$output
12712  );
12713
12714  TF_DerivedOperandTypeAttr Tshift = TF_DerivedOperandTypeAttr<1>;
12715  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12716  TF_DerivedOperandTypeAttr Taxis = TF_DerivedOperandTypeAttr<2>;
12717}
12718
12719def TF_RoundOp : TF_Op<"Round", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
12720  let summary = [{
12721Rounds the values of a tensor to the nearest integer, element-wise.
12722  }];
12723
12724  let description = [{
12725Rounds half to even.  Also known as bankers rounding. If you want to round
12726according to the current system rounding mode use std::cint.
12727  }];
12728
12729  let arguments = (ins
12730    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
12731  );
12732
12733  let results = (outs
12734    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
12735  );
12736
12737  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12738}
12739
12740def TF_RsqrtOp : TF_Op<"Rsqrt", [NoSideEffect, SameOperandsAndResultType]> {
12741  let summary = "Computes reciprocal of square root of x element-wise.";
12742
12743  let description = [{
12744I.e., \\(y = 1 / \sqrt{x}\\).
12745  }];
12746
12747  let arguments = (ins
12748    TF_FpOrComplexTensor:$x
12749  );
12750
12751  let results = (outs
12752    TF_FpOrComplexTensor:$y
12753  );
12754
12755  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12756}
12757
12758def TF_RsqrtGradOp : TF_Op<"RsqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
12759  let summary = "Computes the gradient for the rsqrt of `x` wrt its input.";
12760
12761  let description = [{
12762Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
12763is the corresponding input gradient.
12764  }];
12765
12766  let arguments = (ins
12767    TF_FpOrComplexTensor:$y,
12768    TF_FpOrComplexTensor:$dy
12769  );
12770
12771  let results = (outs
12772    TF_FpOrComplexTensor:$z
12773  );
12774
12775  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
12776}
12777
12778def TF_SaveOp : TF_Op<"Save", []> {
12779  let summary = "Saves the input tensors to disk.";
12780
12781  let description = [{
12782The size of `tensor_names` must match the number of tensors in `data`. `data[i]`
12783is written to `filename` with name `tensor_names[i]`.
12784
12785See also `SaveSlices`.
12786  }];
12787
12788  let arguments = (ins
12789    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write
12790the tensor.}]>:$filename,
12791    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
12792    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
12793  );
12794
12795  let results = (outs);
12796
12797  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<2>;
12798}
12799
12800def TF_SaveSlicesOp : TF_Op<"SaveSlices", []> {
12801  let summary = "Saves input tensors slices to disk.";
12802
12803  let description = [{
12804This is like `Save` except that tensors can be listed in the saved file as being
12805a slice of a larger tensor.  `shapes_and_slices` specifies the shape of the
12806larger tensor and the slice that this tensor covers. `shapes_and_slices` must
12807have as many elements as `tensor_names`.
12808
12809Elements of the `shapes_and_slices` input must either be:
12810
12811*  The empty string, in which case the corresponding tensor is
12812   saved normally.
12813*  A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the
12814   `dimI` are the dimensions of the larger tensor and `slice-spec`
12815   specifies what part is covered by the tensor to save.
12816
12817`slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1`
12818where each `sliceI` is either:
12819
12820*  The string `-` meaning that the slice covers all indices of this dimension
12821*  `start,length` where `start` and `length` are integers.  In that
12822   case the slice covers `length` indices starting at `start`.
12823
12824See also `Save`.
12825  }];
12826
12827  let arguments = (ins
12828    Arg<TF_StrTensor, [{Must have a single element. The name of the file to which we write the
12829tensor.}]>:$filename,
12830    Arg<TF_StrTensor, [{Shape `[N]`. The names of the tensors to be saved.}]>:$tensor_names,
12831    Arg<TF_StrTensor, [{Shape `[N]`.  The shapes and slice specifications to use when
12832saving the tensors.}]>:$shapes_and_slices,
12833    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$data
12834  );
12835
12836  let results = (outs);
12837
12838  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<3>;
12839}
12840
12841def TF_SaveV2Op : TF_Op<"SaveV2", []> {
12842  let summary = "Saves tensors in V2 checkpoint format.";
12843
12844  let description = [{
12845By default, saves the named tensors in full.  If the caller wishes to save
12846specific slices of full tensors, "shape_and_slices" should be non-empty strings
12847and correspondingly well-formed.
12848  }];
12849
12850  let arguments = (ins
12851    Arg<TF_StrTensor, [{Must have a single element. The prefix of the V2 checkpoint to which we
12852write the tensors.}]>:$prefix,
12853    Arg<TF_StrTensor, [{shape {N}. The names of the tensors to be saved.}]>:$tensor_names,
12854    Arg<TF_StrTensor, [{shape {N}.  The slice specs of the tensors to be saved.
12855Empty strings indicate that they are non-partitioned tensors.}]>:$shape_and_slices,
12856    Arg<Variadic<TF_Tensor>, [{`N` tensors to save.}]>:$tensors
12857  );
12858
12859  let results = (outs);
12860
12861  TF_DerivedOperandTypeListAttr dtypes = TF_DerivedOperandTypeListAttr<3>;
12862}
12863
12864def TF_ScatterNdOp : TF_Op<"ScatterNd", [NoSideEffect]> {
12865  let summary = "Scatter `updates` into a new tensor according to `indices`.";
12866
12867  let description = [{
12868Creates a new tensor by applying sparse `updates` to individual values or
12869slices within a tensor (initially zero for numeric, empty for string) of
12870the given `shape` according to indices.  This operator is the inverse of the
12871`tf.gather_nd` operator which extracts values or slices from a given tensor.
12872
12873This operation is similar to tensor_scatter_add, except that the tensor is
12874zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical
12875to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`
12876
12877If `indices` contains duplicates, then their updates are accumulated (summed).
12878
12879**WARNING**: The order in which updates are applied is nondeterministic, so the
12880output will be nondeterministic if `indices` contains duplicates -- because
12881of some numerical approximation issues, numbers summed in different order
12882may yield different results.
12883
12884`indices` is an integer tensor containing indices into a new tensor of shape
12885`shape`.  The last dimension of `indices` can be at most the rank of `shape`:
12886
12887    indices.shape[-1] <= shape.rank
12888
12889The last dimension of `indices` corresponds to indices into elements
12890(if `indices.shape[-1] = shape.rank`) or slices
12891(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
12892`shape`.  `updates` is a tensor with shape
12893
12894    indices.shape[:-1] + shape[indices.shape[-1]:]
12895
12896The simplest form of scatter is to insert individual elements in a tensor by
12897index. For example, say we want to insert 4 scattered elements in a rank-1
12898tensor with 8 elements.
12899
12900<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12901<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt>
12902</div>
12903
12904In Python, this scatter operation would look like this:
12905
12906```python
12907    indices = tf.constant([[4], [3], [1], [7]])
12908    updates = tf.constant([9, 10, 11, 12])
12909    shape = tf.constant([8])
12910    scatter = tf.scatter_nd(indices, updates, shape)
12911    print(scatter)
12912```
12913
12914The resulting tensor would look like this:
12915
12916    [0, 11, 0, 10, 9, 0, 0, 12]
12917
12918We can also, insert entire slices of a higher rank tensor all at once. For
12919example, if we wanted to insert two slices in the first dimension of a
12920rank-3 tensor with two matrices of new values.
12921
12922<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12923<img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt>
12924</div>
12925
12926In Python, this scatter operation would look like this:
12927
12928```python
12929    indices = tf.constant([[0], [2]])
12930    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
12931                            [7, 7, 7, 7], [8, 8, 8, 8]],
12932                           [[5, 5, 5, 5], [6, 6, 6, 6],
12933                            [7, 7, 7, 7], [8, 8, 8, 8]]])
12934    shape = tf.constant([4, 4, 4])
12935    scatter = tf.scatter_nd(indices, updates, shape)
12936    print(scatter)
12937```
12938
12939The resulting tensor would look like this:
12940
12941    [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
12942     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
12943     [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
12944     [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
12945
12946Note that on CPU, if an out of bound index is found, an error is returned.
12947On GPU, if an out of bound index is found, the index is ignored.
12948  }];
12949
12950  let arguments = (ins
12951    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
12952    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates,
12953    Arg<TF_I32OrI64Tensor, [{1-D. The shape of the resulting tensor.}]>:$shape
12954  );
12955
12956  let results = (outs
12957    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
12958to the indices.}]>:$output
12959  );
12960
12961  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
12962  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
12963}
12964
12965def TF_SegmentMaxOp : TF_Op<"SegmentMax", [NoSideEffect]> {
12966  let summary = "Computes the maximum along segments of a tensor.";
12967
12968  let description = [{
12969Read
12970[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
12971for an explanation of segments.
12972
12973Computes a tensor such that
12974\\(output_i = \max_j(data_j)\\) where `max` is over `j` such
12975that `segment_ids[j] == i`.
12976
12977If the max is empty for a given segment ID `i`, `output[i] = 0`.
12978
12979<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
12980<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
12981</div>
12982
12983For example:
12984
12985```
12986c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
12987tf.segment_max(c, tf.constant([0, 0, 1]))
12988# ==> [[4, 3, 3, 4],
12989#      [5, 6, 7, 8]]
12990```
12991  }];
12992
12993  let arguments = (ins
12994    TF_IntOrFpTensor:$data,
12995    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
12996first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
12997  );
12998
12999  let results = (outs
13000    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
13001has size `k`, the number of segments.}]>:$output
13002  );
13003
13004  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13005  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13006}
13007
13008def TF_SegmentMeanOp : TF_Op<"SegmentMean", [NoSideEffect]> {
13009  let summary = "Computes the mean along segments of a tensor.";
13010
13011  let description = [{
13012Read
13013[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13014for an explanation of segments.
13015
13016Computes a tensor such that
13017\\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
13018over `j` such that `segment_ids[j] == i` and `N` is the total number of
13019values summed.
13020
13021If the mean is empty for a given segment ID `i`, `output[i] = 0`.
13022
13023<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13024<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
13025</div>
13026
13027For example:
13028
13029```
13030c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13031tf.segment_mean(c, tf.constant([0, 0, 1]))
13032# ==> [[2.5, 2.5, 2.5, 2.5],
13033#      [5, 6, 7, 8]]
13034```
13035  }];
13036
13037  let arguments = (ins
13038    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13039    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13040first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13041  );
13042
13043  let results = (outs
13044    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13045has size `k`, the number of segments.}]>:$output
13046  );
13047
13048  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13049  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13050}
13051
13052def TF_SegmentMinOp : TF_Op<"SegmentMin", [NoSideEffect]> {
13053  let summary = "Computes the minimum along segments of a tensor.";
13054
13055  let description = [{
13056Read
13057[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13058for an explanation of segments.
13059
13060Computes a tensor such that
13061\\(output_i = \min_j(data_j)\\) where `min` is over `j` such
13062that `segment_ids[j] == i`.
13063
13064If the min is empty for a given segment ID `i`, `output[i] = 0`.
13065
13066<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13067<img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
13068</div>
13069
13070For example:
13071
13072```
13073c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13074tf.segment_min(c, tf.constant([0, 0, 1]))
13075# ==> [[1, 2, 2, 1],
13076#      [5, 6, 7, 8]]
13077```
13078  }];
13079
13080  let arguments = (ins
13081    TF_IntOrFpTensor:$data,
13082    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13083first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13084  );
13085
13086  let results = (outs
13087    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
13088has size `k`, the number of segments.}]>:$output
13089  );
13090
13091  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13092  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13093}
13094
13095def TF_SegmentProdOp : TF_Op<"SegmentProd", [NoSideEffect]> {
13096  let summary = "Computes the product along segments of a tensor.";
13097
13098  let description = [{
13099Read
13100[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13101for an explanation of segments.
13102
13103Computes a tensor such that
13104\\(output_i = \prod_j data_j\\) where the product is over `j` such
13105that `segment_ids[j] == i`.
13106
13107If the product is empty for a given segment ID `i`, `output[i] = 1`.
13108
13109<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13110<img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
13111</div>
13112
13113For example:
13114
13115```
13116c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13117tf.segment_prod(c, tf.constant([0, 0, 1]))
13118# ==> [[4, 6, 6, 4],
13119#      [5, 6, 7, 8]]
13120```
13121  }];
13122
13123  let arguments = (ins
13124    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13125    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13126first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13127  );
13128
13129  let results = (outs
13130    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13131has size `k`, the number of segments.}]>:$output
13132  );
13133
13134  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13135  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13136}
13137
13138def TF_SegmentSumOp : TF_Op<"SegmentSum", [NoSideEffect]> {
13139  let summary = "Computes the sum along segments of a tensor.";
13140
13141  let description = [{
13142Read
13143[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
13144for an explanation of segments.
13145
13146Computes a tensor such that
13147\\(output_i = \sum_j data_j\\) where sum is over `j` such
13148that `segment_ids[j] == i`.
13149
13150If the sum is empty for a given segment ID `i`, `output[i] = 0`.
13151
13152<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
13153<img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
13154</div>
13155
13156For example:
13157
13158```
13159c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]])
13160tf.segment_sum(c, tf.constant([0, 0, 1]))
13161# ==> [[5, 5, 5, 5],
13162#      [5, 6, 7, 8]]
13163```
13164  }];
13165
13166  let arguments = (ins
13167    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
13168    Arg<TF_I32OrI64Tensor, [{A 1-D tensor whose size is equal to the size of `data`'s
13169first dimension.  Values should be sorted and can be repeated.}]>:$segment_ids
13170  );
13171
13172  let results = (outs
13173    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for dimension 0 which
13174has size `k`, the number of segments.}]>:$output
13175  );
13176
13177  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
13178  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13179}
13180
13181def TF_SelectOp : TF_Op<"Select", [NoSideEffect]> {
13182  let summary = "Selects elements from `x` or `y`, depending on `condition`.";
13183
13184  let description = [{
13185The `x`, and `y` tensors must all have the same shape, and the
13186output will also have that shape.
13187
13188The `condition` tensor must be a scalar if `x` and `y` are scalars.
13189If `x` and `y` are vectors or higher rank, then `condition` must be either a
13190scalar, a vector with size matching the first dimension of `x`, or must have
13191the same shape as `x`.
13192
13193The `condition` tensor acts as a mask that chooses, based on the value at each
13194element, whether the corresponding element / row in the output should be
13195taken from `x` (if true) or `y` (if false).
13196
13197If `condition` is a vector and `x` and `y` are higher rank matrices, then
13198it chooses which row (outer dimension) to copy from `x` and `y`.
13199If `condition` has the same shape as `x` and `y`, then it chooses which
13200element to copy from `x` and `y`.
13201
13202For example:
13203
13204```python
13205# 'condition' tensor is [[True,  False]
13206#                        [False, True]]
13207# 't' is [[1, 2],
13208#         [3, 4]]
13209# 'e' is [[5, 6],
13210#         [7, 8]]
13211select(condition, t, e)  # => [[1, 6], [7, 4]]
13212
13213
13214# 'condition' tensor is [True, False]
13215# 't' is [[1, 2],
13216#         [3, 4]]
13217# 'e' is [[5, 6],
13218#         [7, 8]]
13219select(condition, t, e) ==> [[1, 2],
13220                             [7, 8]]
13221
13222```
13223  }];
13224
13225  let arguments = (ins
13226    TF_BoolTensor:$condition,
13227    Arg<TF_Tensor, [{= A `Tensor` which may have the same shape as `condition`.
13228If `condition` is rank 1, `x` may have higher rank,
13229but its first dimension must match the size of `condition`.}]>:$t,
13230    Arg<TF_Tensor, [{= A `Tensor` with the same type and shape as `x`.}]>:$e
13231  );
13232
13233  let results = (outs
13234    Res<TF_Tensor, [{= A `Tensor` with the same type and shape as `x` and `y`.}]>:$output
13235  );
13236
13237  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13238
13239  let hasCanonicalizer = 1;
13240
13241  let verifier = [{
13242    return Verify(*this);
13243  }];
13244}
13245
13246def TF_SelectV2Op : TF_Op<"SelectV2", [NoSideEffect, ResultsBroadcastableShape]> {
13247  let summary = "";
13248
13249  let arguments = (ins
13250    TF_BoolTensor:$condition,
13251    TF_Tensor:$t,
13252    TF_Tensor:$e
13253  );
13254
13255  let results = (outs
13256    TF_Tensor:$output
13257  );
13258
13259  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13260
13261  let builders = [
13262    OpBuilderDAG<(ins "Value":$condition, "Value":$e, "Value":$t)>
13263  ];
13264}
13265
13266def TF_SelfAdjointEigV2Op : TF_Op<"SelfAdjointEigV2", [NoSideEffect]> {
13267  let summary = [{
13268Computes the eigen decomposition of one or more square self-adjoint matrices.
13269  }];
13270
13271  let description = [{
13272Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
13273`input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
13274are sorted in non-decreasing order.
13275
13276```python
13277# a is a tensor.
13278# e is a tensor of eigenvalues.
13279# v is a tensor of eigenvectors.
13280e, v = self_adjoint_eig(a)
13281e = self_adjoint_eig(a, compute_v=False)
13282```
13283  }];
13284
13285  let arguments = (ins
13286    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{`Tensor` input of shape `[N, N]`.}]>:$input,
13287
13288    DefaultValuedAttr<BoolAttr, "true">:$compute_v
13289  );
13290
13291  let results = (outs
13292    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvalues. Shape is `[N]`.}]>:$e,
13293    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Eigenvectors. Shape is `[N, N]`.}]>:$v
13294  );
13295
13296  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13297}
13298
13299def TF_SeluOp : TF_Op<"Selu", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13300  let summary = [{
13301Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)`
13302  }];
13303
13304  let description = [{
13305if < 0, `scale * features` otherwise.
13306
13307To be used together with
13308`initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`.
13309For correct dropout, use `tf.contrib.nn.alpha_dropout`.
13310
13311See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
13312  }];
13313
13314  let arguments = (ins
13315    TF_FloatTensor:$features
13316  );
13317
13318  let results = (outs
13319    TF_FloatTensor:$activations
13320  );
13321
13322  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13323}
13324
13325def TF_SeluGradOp : TF_Op<"SeluGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13326  let summary = [{
13327Computes gradients for the scaled exponential linear (Selu) operation.
13328  }];
13329
13330  let arguments = (ins
13331    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding Selu operation.}]>:$gradients,
13332    Arg<TF_FloatTensor, [{The outputs of the corresponding Selu operation.}]>:$outputs
13333  );
13334
13335  let results = (outs
13336    Res<TF_FloatTensor, [{The gradients: `gradients * (outputs + scale * alpha)`
13337if outputs < 0, `scale * gradients` otherwise.}]>:$backprops
13338  );
13339
13340  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13341}
13342
13343def TF_SendOp : TF_Op<"Send", []> {
13344  let summary = "Sends the named tensor from send_device to recv_device.";
13345
13346  let arguments = (ins
13347    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
13348
13349    StrAttr:$tensor_name,
13350    StrAttr:$send_device,
13351    I64Attr:$send_device_incarnation,
13352    StrAttr:$recv_device,
13353    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
13354  );
13355
13356  let results = (outs);
13357
13358  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13359}
13360
13361def TF_SerializeIteratorOp : TF_Op<"SerializeIterator", []> {
13362  let summary = [{
13363Converts the given `resource_handle` representing an iterator to a variant tensor.
13364  }];
13365
13366  let arguments = (ins
13367    Arg<TF_ResourceTensor, [{A handle to an iterator resource.}], [TF_DatasetIteratorRead]>:$resource_handle,
13368
13369    DefaultValuedAttr<I64Attr, "0">:$external_state_policy
13370  );
13371
13372  let results = (outs
13373    Res<TF_VariantTensor, [{A variant tensor storing the state of the iterator contained in the
13374resource.}]>:$serialized
13375  );
13376}
13377
13378def TF_SerializeSparseOp : TF_Op<"SerializeSparse", [NoSideEffect]> {
13379  let summary = "Serialize a `SparseTensor` into a `[3]` `Tensor` object.";
13380
13381  let arguments = (ins
13382    Arg<TF_Int64Tensor, [{2-D.  The `indices` of the `SparseTensor`.}]>:$sparse_indices,
13383    Arg<TF_Tensor, [{1-D.  The `values` of the `SparseTensor`.}]>:$sparse_values,
13384    Arg<TF_Int64Tensor, [{1-D.  The `shape` of the `SparseTensor`.}]>:$sparse_shape
13385  );
13386
13387  let results = (outs
13388    TensorOf<[TF_Str, TF_Variant]>:$serialized_sparse
13389  );
13390
13391  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
13392  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13393}
13394
13395def TF_ShapeOp : TF_Op<"Shape", [NoSideEffect]> {
13396  let summary = "Returns the shape of a tensor.";
13397
13398  let description = [{
13399This operation returns a 1-D integer tensor representing the shape of `input`.
13400
13401For example:
13402
13403```
13404# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
13405shape(t) ==> [2, 2, 3]
13406```
13407  }];
13408
13409  let arguments = (ins
13410    TF_Tensor:$input
13411  );
13412
13413  let results = (outs
13414    TF_I32OrI64Tensor:$output
13415  );
13416
13417  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13418  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13419
13420  let verifier = [{
13421    return Verify(*this);
13422  }];
13423
13424  let builders = [
13425    OpBuilderDAG<(ins "Value":$input, "BoolAttr":$use32Bit)>
13426  ];
13427
13428  let hasFolder = 1;
13429}
13430
13431def TF_ShapeNOp : TF_Op<"ShapeN", [NoSideEffect]> {
13432  let summary = "Returns shape of tensors.";
13433
13434  let description = [{
13435This operation returns N 1-D integer tensors representing shape of `input[i]s`.
13436  }];
13437
13438  let arguments = (ins
13439    Variadic<TF_Tensor>:$input
13440  );
13441
13442  let results = (outs
13443    Variadic<TF_I32OrI64Tensor>:$output
13444  );
13445
13446  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13447  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13448  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
13449
13450  let verifier = [{
13451    return Verify(*this);
13452  }];
13453
13454  let hasCanonicalizer = 1;
13455}
13456
13457def TF_ShardedFilenameOp : TF_Op<"ShardedFilename", [NoSideEffect]> {
13458  let summary = [{
13459Generate a sharded filename. The filename is printf formatted as
13460  }];
13461
13462  let description = [{
13463%s-%05d-of-%05d, basename, shard, num_shards.
13464  }];
13465
13466  let arguments = (ins
13467    TF_StrTensor:$basename,
13468    TF_Int32Tensor:$shard,
13469    TF_Int32Tensor:$num_shards
13470  );
13471
13472  let results = (outs
13473    TF_StrTensor:$filename
13474  );
13475}
13476
13477def TF_ShuffleAndRepeatDatasetV2Op : TF_Op<"ShuffleAndRepeatDatasetV2", []> {
13478  let summary = "";
13479
13480  let arguments = (ins
13481    TF_VariantTensor:$input_dataset,
13482    TF_Int64Tensor:$buffer_size,
13483    TF_Int64Tensor:$seed,
13484    TF_Int64Tensor:$seed2,
13485    TF_Int64Tensor:$count,
13486    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13487
13488    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
13489    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13490    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13491  );
13492
13493  let results = (outs
13494    TF_VariantTensor:$handle
13495  );
13496}
13497
13498def TF_ShuffleDatasetV2Op : TF_Op<"ShuffleDatasetV2", []> {
13499  let summary = "";
13500
13501  let arguments = (ins
13502    TF_VariantTensor:$input_dataset,
13503    TF_Int64Tensor:$buffer_size,
13504    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13505
13506    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13507    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13508  );
13509
13510  let results = (outs
13511    TF_VariantTensor:$handle
13512  );
13513}
13514
13515def TF_ShuffleDatasetV3Op : TF_Op<"ShuffleDatasetV3", []> {
13516  let summary = "";
13517
13518  let arguments = (ins
13519    TF_VariantTensor:$input_dataset,
13520    TF_Int64Tensor:$buffer_size,
13521    TF_Int64Tensor:$seed,
13522    TF_Int64Tensor:$seed2,
13523    Arg<TF_ResourceTensor, "", [TF_DatasetSeedGeneratorRead, TF_DatasetSeedGeneratorWrite]>:$seed_generator,
13524
13525    DefaultValuedAttr<BoolAttr, "true">:$reshuffle_each_iteration,
13526    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
13527    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
13528  );
13529
13530  let results = (outs
13531    TF_VariantTensor:$handle
13532  );
13533}
13534
13535def TF_ShutdownDistributedTPUOp : TF_Op<"ShutdownDistributedTPU", []> {
13536  let summary = "Shuts down a running distributed TPU system.";
13537
13538  let description = [{
13539The op returns an error if no system is running.
13540  }];
13541
13542  let arguments = (ins);
13543
13544  let results = (outs);
13545}
13546
13547def TF_SigmoidOp : TF_Op<"Sigmoid", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13548  let summary = "Computes sigmoid of `x` element-wise.";
13549
13550  let description = [{
13551Specifically, `y = 1 / (1 + exp(-x))`.
13552  }];
13553
13554  let arguments = (ins
13555    TF_FpOrComplexTensor:$x
13556  );
13557
13558  let results = (outs
13559    TF_FpOrComplexTensor:$y
13560  );
13561
13562  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13563}
13564
13565def TF_SigmoidGradOp : TF_Op<"SigmoidGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13566  let summary = "Computes the gradient of the sigmoid of `x` wrt its input.";
13567
13568  let description = [{
13569Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
13570`dy` is the corresponding input gradient.
13571  }];
13572
13573  let arguments = (ins
13574    TF_FpOrComplexTensor:$y,
13575    TF_FpOrComplexTensor:$dy
13576  );
13577
13578  let results = (outs
13579    TF_FpOrComplexTensor:$z
13580  );
13581
13582  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13583}
13584
13585def TF_SignOp : TF_Op<"Sign", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
13586  let summary = "Returns an element-wise indication of the sign of a number.";
13587
13588  let description = [{
13589`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
13590
13591For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
13592
13593Example usage:
13594>>> tf.math.sign([0., 2., -3.])
13595<tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0.,  1., -1.], dtype=float32)>
13596  }];
13597
13598  let arguments = (ins
13599    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x
13600  );
13601
13602  let results = (outs
13603    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y
13604  );
13605
13606  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13607}
13608
13609def TF_SinOp : TF_Op<"Sin", [NoSideEffect, SameOperandsAndResultType]> {
13610  let summary = "Computes sine of x element-wise.";
13611
13612  let description = [{
13613Given an input tensor, this function computes sine of every
13614  element in the tensor. Input range is `(-inf, inf)` and
13615  output range is `[-1,1]`.
13616
13617  ```python
13618  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")])
13619  tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 nan]
13620  ```
13621  }];
13622
13623  let arguments = (ins
13624    TF_FpOrComplexTensor:$x
13625  );
13626
13627  let results = (outs
13628    TF_FpOrComplexTensor:$y
13629  );
13630
13631  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13632}
13633
13634def TF_SinhOp : TF_Op<"Sinh", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13635  let summary = "Computes hyperbolic sine of x element-wise.";
13636
13637  let description = [{
13638Given an input tensor, this function computes hyperbolic sine of every
13639  element in the tensor. Input range is `[-inf,inf]` and output range
13640  is `[-inf,inf]`.
13641
13642  ```python
13643  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")])
13644  tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 3.6268604e+00 1.1013232e+04 inf]
13645  ```
13646  }];
13647
13648  let arguments = (ins
13649    TF_FpOrComplexTensor:$x
13650  );
13651
13652  let results = (outs
13653    TF_FpOrComplexTensor:$y
13654  );
13655
13656  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13657}
13658
13659def TF_SizeOp : TF_Op<"Size", [NoSideEffect]> {
13660  let summary = "Returns the size of a tensor.";
13661
13662  let description = [{
13663This operation returns an integer representing the number of elements in
13664`input`.
13665
13666For example:
13667
13668```
13669# 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
13670size(t) ==> 12
13671```
13672  }];
13673
13674  let arguments = (ins
13675    TF_Tensor:$input
13676  );
13677
13678  let results = (outs
13679    TF_I32OrI64Tensor:$output
13680  );
13681
13682  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13683  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
13684
13685  let verifier = [{
13686    return Verify(*this);
13687  }];
13688
13689  let hasFolder = 1;
13690}
13691
13692def TF_SliceOp : TF_Op<"Slice", [NoSideEffect]> {
13693  let summary = "Return a slice from 'input'.";
13694
13695  let description = [{
13696The output tensor is a tensor with dimensions described by 'size'
13697whose values are extracted from 'input' starting at the offsets in
13698'begin'.
13699
13700*Requirements*:
13701  0 <= begin[i] <= begin[i] + size[i] <= Di  for i in [0, n)
13702  }];
13703
13704  let arguments = (ins
13705    TF_Tensor:$input,
13706    Arg<TF_I32OrI64Tensor, [{begin[i] specifies the offset into the 'i'th dimension of
13707'input' to slice from.}]>:$begin,
13708    Arg<TF_I32OrI64Tensor, [{size[i] specifies the number of elements of the 'i'th dimension
13709of 'input' to slice. If size[i] is -1, all remaining elements in dimension
13710i are included in the slice (i.e. this is equivalent to setting
13711size[i] = input.dim_size(i) - begin[i]).}]>:$size
13712  );
13713
13714  let results = (outs
13715    TF_Tensor:$output
13716  );
13717
13718  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13719  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
13720
13721  let verifier = [{
13722    return Verify(*this);
13723  }];
13724}
13725
13726def TF_SnapshotOp : TF_Op<"Snapshot", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13727  let summary = "Returns a copy of the input tensor.";
13728
13729  let arguments = (ins
13730    TF_Tensor:$input
13731  );
13732
13733  let results = (outs
13734    TF_Tensor:$output
13735  );
13736
13737  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13738}
13739
13740def TF_SoftmaxOp : TF_Op<"Softmax", [NoSideEffect, SameOperandsAndResultType]> {
13741  let summary = "Computes softmax activations.";
13742
13743  let description = [{
13744For each batch `i` and class `j` we have
13745
13746    $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
13747  }];
13748
13749  let arguments = (ins
13750    Arg<TF_FloatTensor, [{2-D with shape `[batch_size, num_classes]`.}]>:$logits
13751  );
13752
13753  let results = (outs
13754    Res<TF_FloatTensor, [{Same shape as `logits`.}]>:$softmax
13755  );
13756
13757  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13758
13759  let verifier = [{
13760    return Verify(*this);
13761  }];
13762}
13763
13764def TF_SoftmaxCrossEntropyWithLogitsOp : TF_Op<"SoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
13765  let summary = [{
13766Computes softmax cross entropy cost and gradients to backpropagate.
13767  }];
13768
13769  let description = [{
13770Inputs are the logits, not probabilities.
13771  }];
13772
13773  let arguments = (ins
13774    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
13775    Arg<TF_FloatTensor, [{batch_size x num_classes matrix
13776The caller must ensure that each batch of labels represents a valid
13777probability distribution.}]>:$labels
13778  );
13779
13780  let results = (outs
13781    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
13782    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
13783  );
13784
13785  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13786
13787  let verifier = [{
13788    return Verify(*this);
13789  }];
13790}
13791
13792def TF_SoftplusOp : TF_Op<"Softplus", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13793  let summary = "Computes softplus: `log(exp(features) + 1)`.";
13794
13795  let arguments = (ins
13796    TF_FloatTensor:$features
13797  );
13798
13799  let results = (outs
13800    TF_FloatTensor:$activations
13801  );
13802
13803  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13804}
13805
13806def TF_SoftplusGradOp : TF_Op<"SoftplusGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13807  let summary = "Computes softplus gradients for a softplus operation.";
13808
13809  let arguments = (ins
13810    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softplus operation.}]>:$gradients,
13811    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softplus operation.}]>:$features
13812  );
13813
13814  let results = (outs
13815    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + exp(-features))`.}]>:$backprops
13816  );
13817
13818  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13819}
13820
13821def TF_SoftsignOp : TF_Op<"Softsign", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13822  let summary = "Computes softsign: `features / (abs(features) + 1)`.";
13823
13824  let arguments = (ins
13825    TF_FloatTensor:$features
13826  );
13827
13828  let results = (outs
13829    TF_FloatTensor:$activations
13830  );
13831
13832  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13833}
13834
13835def TF_SoftsignGradOp : TF_Op<"SoftsignGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
13836  let summary = "Computes softsign gradients for a softsign operation.";
13837
13838  let arguments = (ins
13839    Arg<TF_FloatTensor, [{The backpropagated gradients to the corresponding softsign operation.}]>:$gradients,
13840    Arg<TF_FloatTensor, [{The features passed as input to the corresponding softsign operation.}]>:$features
13841  );
13842
13843  let results = (outs
13844    Res<TF_FloatTensor, [{The gradients: `gradients / (1 + abs(features)) ** 2`.}]>:$backprops
13845  );
13846
13847  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13848}
13849
13850def TF_SpaceToBatchOp : TF_Op<"SpaceToBatch", [NoSideEffect]> {
13851  let summary = "SpaceToBatch for 4-D tensors of type T.";
13852
13853  let description = [{
13854This is a legacy version of the more general SpaceToBatchND.
13855
13856Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
13857More specifically, this op outputs a copy of the input tensor where values from
13858the `height` and `width` dimensions are moved to the `batch` dimension. After
13859the zero-padding, both `height` and `width` of the input must be divisible by the
13860block size.
13861  }];
13862
13863  let arguments = (ins
13864    Arg<TF_Tensor, [{4-D with shape `[batch, height, width, depth]`.}]>:$input,
13865    Arg<TF_I32OrI64Tensor, [{2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
13866  the padding of the input with zeros across the spatial dimensions as follows:
13867
13868      paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
13869
13870  The effective spatial dimensions of the zero-padded input tensor will be:
13871
13872      height_pad = pad_top + height + pad_bottom
13873      width_pad = pad_left + width + pad_right
13874
13875The attr `block_size` must be greater than one. It indicates the block size.
13876
13877  * Non-overlapping blocks of size `block_size x block size` in the height and
13878    width dimensions are rearranged into the batch dimension at each location.
13879  * The batch of the output tensor is `batch * block_size * block_size`.
13880  * Both height_pad and width_pad must be divisible by block_size.
13881
13882The shape of the output will be:
13883
13884    [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
13885     depth]
13886
13887Some examples:
13888
13889(1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2:
13890
13891```
13892x = [[[[1], [2]], [[3], [4]]]]
13893```
13894
13895The output tensor has shape `[4, 1, 1, 1]` and value:
13896
13897```
13898[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
13899```
13900
13901(2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2:
13902
13903```
13904x = [[[[1, 2, 3], [4, 5, 6]],
13905      [[7, 8, 9], [10, 11, 12]]]]
13906```
13907
13908The output tensor has shape `[4, 1, 1, 3]` and value:
13909
13910```
13911[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
13912```
13913
13914(3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
13915
13916```
13917x = [[[[1],   [2],  [3],  [4]],
13918      [[5],   [6],  [7],  [8]],
13919      [[9],  [10], [11],  [12]],
13920      [[13], [14], [15],  [16]]]]
13921```
13922
13923The output tensor has shape `[4, 2, 2, 1]` and value:
13924
13925```
13926x = [[[[1], [3]], [[9], [11]]],
13927     [[[2], [4]], [[10], [12]]],
13928     [[[5], [7]], [[13], [15]]],
13929     [[[6], [8]], [[14], [16]]]]
13930```
13931
13932(4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2:
13933
13934```
13935x = [[[[1],   [2],  [3],  [4]],
13936      [[5],   [6],  [7],  [8]]],
13937     [[[9],  [10], [11],  [12]],
13938      [[13], [14], [15],  [16]]]]
13939```
13940
13941The output tensor has shape `[8, 1, 2, 1]` and value:
13942
13943```
13944x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
13945     [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
13946```
13947
13948Among others, this operation is useful for reducing atrous convolution into
13949regular convolution.}]>:$paddings,
13950
13951    Confined<I64Attr, [IntMinValue<2>]>:$block_size
13952  );
13953
13954  let results = (outs
13955    TF_Tensor:$output
13956  );
13957
13958  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
13959  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<1>;
13960}
13961
13962def TF_SpaceToBatchNDOp : TF_Op<"SpaceToBatchND", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect]> {
13963  let summary = "SpaceToBatch for N-D tensors of type T.";
13964
13965  let description = [{
13966This operation divides "spatial" dimensions `[1, ..., M]` of the input into a
13967grid of blocks of shape `block_shape`, and interleaves these blocks with the
13968"batch" dimension (0) such that in the output, the spatial dimensions
13969`[1, ..., M]` correspond to the position within the grid, and the batch
13970dimension combines both the position within a spatial block and the original
13971batch position.  Prior to division into blocks, the spatial dimensions of the
13972input are optionally zero padded according to `paddings`.  See below for a
13973precise description.
13974  }];
13975
13976  let arguments = (ins
13977    Arg<TF_Tensor, [{N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
13978where spatial_shape has `M` dimensions.}]>:$input,
13979    Arg<TF_I32OrI64Tensor, [{1-D with shape `[M]`, all values must be >= 1.}]>:$block_shape,
13980    Arg<TF_I32OrI64Tensor, [{2-D with shape `[M, 2]`, all values must be >= 0.
13981  `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension
13982  `i + 1`, which corresponds to spatial dimension `i`.  It is required that
13983  `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`.
13984
13985This operation is equivalent to the following steps:
13986
139871. Zero-pad the start and end of dimensions `[1, ..., M]` of the
13988   input according to `paddings` to produce `padded` of shape `padded_shape`.
13989
139902. Reshape `padded` to `reshaped_padded` of shape:
13991
13992     [batch] +
13993     [padded_shape[1] / block_shape[0],
13994       block_shape[0],
13995      ...,
13996      padded_shape[M] / block_shape[M-1],
13997      block_shape[M-1]] +
13998     remaining_shape
13999
140003. Permute dimensions of `reshaped_padded` to produce
14001   `permuted_reshaped_padded` of shape:
14002
14003     block_shape +
14004     [batch] +
14005     [padded_shape[1] / block_shape[0],
14006      ...,
14007      padded_shape[M] / block_shape[M-1]] +
14008     remaining_shape
14009
140104. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch
14011   dimension, producing an output tensor of shape:
14012
14013     [batch * prod(block_shape)] +
14014     [padded_shape[1] / block_shape[0],
14015      ...,
14016      padded_shape[M] / block_shape[M-1]] +
14017     remaining_shape
14018
14019Some examples:
14020
14021(1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and
14022    `paddings = [[0, 0], [0, 0]]`:
14023
14024```
14025x = [[[[1], [2]], [[3], [4]]]]
14026```
14027
14028The output tensor has shape `[4, 1, 1, 1]` and value:
14029
14030```
14031[[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
14032```
14033
14034(2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and
14035    `paddings = [[0, 0], [0, 0]]`:
14036
14037```
14038x = [[[[1, 2, 3], [4, 5, 6]],
14039      [[7, 8, 9], [10, 11, 12]]]]
14040```
14041
14042The output tensor has shape `[4, 1, 1, 3]` and value:
14043
14044```
14045[[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
14046```
14047
14048(3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
14049    `paddings = [[0, 0], [0, 0]]`:
14050
14051```
14052x = [[[[1],   [2],  [3],  [4]],
14053      [[5],   [6],  [7],  [8]],
14054      [[9],  [10], [11],  [12]],
14055      [[13], [14], [15],  [16]]]]
14056```
14057
14058The output tensor has shape `[4, 2, 2, 1]` and value:
14059
14060```
14061x = [[[[1], [3]], [[9], [11]]],
14062     [[[2], [4]], [[10], [12]]],
14063     [[[5], [7]], [[13], [15]]],
14064     [[[6], [8]], [[14], [16]]]]
14065```
14066
14067(4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and
14068    paddings = `[[0, 0], [2, 0]]`:
14069
14070```
14071x = [[[[1],   [2],  [3],  [4]],
14072      [[5],   [6],  [7],  [8]]],
14073     [[[9],  [10], [11],  [12]],
14074      [[13], [14], [15],  [16]]]]
14075```
14076
14077The output tensor has shape `[8, 1, 3, 1]` and value:
14078
14079```
14080x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
14081     [[[0], [2], [4]]], [[[0], [10], [12]]],
14082     [[[0], [5], [7]]], [[[0], [13], [15]]],
14083     [[[0], [6], [8]]], [[[0], [14], [16]]]]
14084```
14085
14086Among others, this operation is useful for reducing atrous convolution into
14087regular convolution.}]>:$paddings
14088  );
14089
14090  let results = (outs
14091    TF_Tensor:$output
14092  );
14093
14094  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14095  TF_DerivedOperandTypeAttr Tpaddings = TF_DerivedOperandTypeAttr<2>;
14096  TF_DerivedOperandTypeAttr Tblock_shape = TF_DerivedOperandTypeAttr<1>;
14097
14098  let verifier = [{ return Verify(*this); }];
14099
14100  let extraClassDeclaration = [{
14101    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
14102      return ArraysAreCastCompatible(l, r);
14103    }
14104  }];
14105}
14106
14107def TF_SpaceToDepthOp : TF_Op<"SpaceToDepth", [NoSideEffect]> {
14108  let summary = "SpaceToDepth for tensors of type T.";
14109
14110  let description = [{
14111Rearranges blocks of spatial data, into depth. More specifically,
14112this op outputs a copy of the input tensor where values from the `height`
14113and `width` dimensions are moved to the `depth` dimension.
14114The attr `block_size` indicates the input block size.
14115
14116  * Non-overlapping blocks of size `block_size x block size` are rearranged
14117    into depth at each location.
14118  * The depth of the output tensor is `block_size * block_size * input_depth`.
14119  * The Y, X coordinates within each block of the input become the high order
14120    component of the output channel index.
14121  * The input tensor's height and width must be divisible by block_size.
14122
14123The `data_format` attr specifies the layout of the input and output tensors
14124with the following options:
14125  "NHWC": `[ batch, height, width, channels ]`
14126  "NCHW": `[ batch, channels, height, width ]`
14127  "NCHW_VECT_C":
14128      `qint8 [ batch, channels / 4, height, width, 4 ]`
14129
14130It is useful to consider the operation as transforming a 6-D Tensor.
14131e.g. for data_format = NHWC,
14132     Each element in the input tensor can be specified via 6 coordinates,
14133     ordered by decreasing memory layout significance as:
14134     n,oY,bY,oX,bX,iC  (where n=batch index, oX, oY means X or Y coordinates
14135                        within the output image, bX, bY means coordinates
14136                        within the input block, iC means input channels).
14137     The output would be a transpose to the following layout:
14138     n,oY,oX,bY,bX,iC
14139
14140This operation is useful for resizing the activations between convolutions
14141(but keeping all data), e.g. instead of pooling. It is also useful for training
14142purely convolutional models.
14143
14144For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and
14145block_size = 2:
14146
14147```
14148x = [[[[1], [2]],
14149      [[3], [4]]]]
14150```
14151
14152This operation will output a tensor of shape `[1, 1, 1, 4]`:
14153
14154```
14155[[[[1, 2, 3, 4]]]]
14156```
14157
14158Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`,
14159the corresponding output will have a single element (i.e. width and height are
14160both 1) and will have a depth of 4 channels (1 * block_size * block_size).
14161The output element shape is `[1, 1, 4]`.
14162
14163For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g.
14164
14165```
14166x = [[[[1, 2, 3], [4, 5, 6]],
14167      [[7, 8, 9], [10, 11, 12]]]]
14168```
14169
14170This operation, for block_size of 2, will return the following tensor of shape
14171`[1, 1, 1, 12]`
14172
14173```
14174[[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
14175```
14176
14177Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2:
14178
14179```
14180x = [[[[1],   [2],  [5],  [6]],
14181      [[3],   [4],  [7],  [8]],
14182      [[9],  [10], [13],  [14]],
14183      [[11], [12], [15],  [16]]]]
14184```
14185
14186the operator will return the following tensor of shape `[1 2 2 4]`:
14187
14188```
14189x = [[[[1, 2, 3, 4],
14190       [5, 6, 7, 8]],
14191      [[9, 10, 11, 12],
14192       [13, 14, 15, 16]]]]
14193```
14194  }];
14195
14196  let arguments = (ins
14197    TF_Tensor:$input,
14198
14199    Confined<I64Attr, [IntMinValue<2>]>:$block_size,
14200    DefaultValuedAttr<TF_AnyStrAttrOf<["NHWC", "NCHW", "NCHW_VECT_C"]>, "NHWC">:$data_format
14201  );
14202
14203  let results = (outs
14204    TF_Tensor:$output
14205  );
14206
14207  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14208}
14209
14210def TF_SparseFillEmptyRowsOp : TF_Op<"SparseFillEmptyRows", [NoSideEffect]> {
14211  let summary = [{
14212Fills empty rows in the input 2-D `SparseTensor` with a default value.
14213  }];
14214
14215  let description = [{
14216The input `SparseTensor` is represented via the tuple of inputs
14217(`indices`, `values`, `dense_shape`).  The output `SparseTensor` has the
14218same `dense_shape` but with indices `output_indices` and values
14219`output_values`.
14220
14221This op inserts a single entry for every row that doesn't have any values.
14222The index is created as `[row, 0, ..., 0]` and the inserted value
14223is `default_value`.
14224
14225For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
14226
14227    [0, 1]: a
14228    [0, 3]: b
14229    [2, 0]: c
14230    [3, 1]: d
14231
14232Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
14233
14234    [0, 1]: a
14235    [0, 3]: b
14236    [1, 0]: default_value
14237    [2, 0]: c
14238    [3, 1]: d
14239    [4, 0]: default_value
14240
14241The output `SparseTensor` will be in row-major order and will have the
14242same shape as the input.
14243
14244This op also returns an indicator vector shaped `[dense_shape[0]]` such that
14245
14246    empty_row_indicator[i] = True iff row i was an empty row.
14247
14248And a reverse index map vector shaped `[indices.shape[0]]` that is used during
14249backpropagation,
14250
14251    reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
14252  }];
14253
14254  let arguments = (ins
14255    Arg<TF_Int64Tensor, [{2-D. the indices of the sparse tensor.}]>:$indices,
14256    Arg<TF_Tensor, [{1-D. the values of the sparse tensor.}]>:$values,
14257    Arg<TF_Int64Tensor, [{1-D. the shape of the sparse tensor.}]>:$dense_shape,
14258    Arg<TF_Tensor, [{0-D. default value to insert into location `[row, 0, ..., 0]`
14259  for rows missing from the input sparse tensor.
14260output indices: 2-D. the indices of the filled sparse tensor.}]>:$default_value
14261  );
14262
14263  let results = (outs
14264    TF_Int64Tensor:$output_indices,
14265    Res<TF_Tensor, [{1-D. the values of the filled sparse tensor.}]>:$output_values,
14266    Res<TF_BoolTensor, [{1-D. whether the dense row was missing in the
14267input sparse tensor.}]>:$empty_row_indicator,
14268    Res<TF_Int64Tensor, [{1-D. a map from the input indices to the output indices.}]>:$reverse_index_map
14269  );
14270
14271  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14272}
14273
14274def TF_SparseMatMulOp : TF_Op<"SparseMatMul", [NoSideEffect]> {
14275  let summary = [{
14276Multiply matrix "a" by matrix "b".
14277  }];
14278
14279  let description = [{
14280The inputs must be two-dimensional matrices and the inner dimension of "a" must
14281match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
14282`SparseTensor`s.  This op is optimized for the case where at least one of "a" or
14283"b" is sparse, in the sense that they have a large proportion of zero values.
14284The breakeven for using this versus a dense matrix multiply on one platform was
1428530% zero values in the sparse matrix.
14286
14287The gradient computation of this operation will only take advantage of sparsity
14288in the input gradient when that gradient comes from a Relu.
14289  }];
14290
14291  let arguments = (ins
14292    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
14293    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
14294
14295    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
14296    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
14297    DefaultValuedAttr<BoolAttr, "false">:$a_is_sparse,
14298    DefaultValuedAttr<BoolAttr, "false">:$b_is_sparse
14299  );
14300
14301  let results = (outs
14302    TF_Float32Tensor:$product
14303  );
14304
14305  TF_DerivedOperandTypeAttr Ta = TF_DerivedOperandTypeAttr<0>;
14306  TF_DerivedOperandTypeAttr Tb = TF_DerivedOperandTypeAttr<1>;
14307}
14308
14309def TF_SparseReshapeOp : TF_Op<"SparseReshape", [NoSideEffect]> {
14310  let summary = [{
14311Reshapes a SparseTensor to represent values in a new dense shape.
14312  }];
14313
14314  let description = [{
14315This operation has the same semantics as reshape on the represented dense
14316tensor.  The `input_indices` are recomputed based on the requested `new_shape`.
14317
14318If one component of `new_shape` is the special value -1, the size of that
14319dimension is computed so that the total dense size remains constant.  At
14320most one component of `new_shape` can be -1.  The number of dense elements
14321implied by `new_shape` must be the same as the number of dense elements
14322originally implied by `input_shape`.
14323
14324Reshaping does not affect the order of values in the SparseTensor.
14325
14326If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
14327has length `R_out`, then `input_indices` has shape `[N, R_in]`,
14328`input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
14329`output_shape` has length `R_out`.
14330  }];
14331
14332  let arguments = (ins
14333    Arg<TF_Int64Tensor, [{2-D.  `N x R_in` matrix with the indices of non-empty values in a
14334SparseTensor.}]>:$input_indices,
14335    Arg<TF_Int64Tensor, [{1-D.  `R_in` vector with the input SparseTensor's dense shape.}]>:$input_shape,
14336    Arg<TF_Int64Tensor, [{1-D.  `R_out` vector with the requested new dense shape.}]>:$new_shape
14337  );
14338
14339  let results = (outs
14340    Res<TF_Int64Tensor, [{2-D.  `N x R_out` matrix with the updated indices of non-empty
14341values in the output SparseTensor.}]>:$output_indices,
14342    Res<TF_Int64Tensor, [{1-D.  `R_out` vector with the full dense shape of the output
14343SparseTensor.  This is the same as `new_shape` but with any -1 dimensions
14344filled in.}]>:$output_shape
14345  );
14346}
14347
14348def TF_SparseSegmentMeanOp : TF_Op<"SparseSegmentMean", [NoSideEffect]> {
14349  let summary = "Computes the mean along sparse segments of a tensor.";
14350
14351  let description = [{
14352See `tf.sparse.segment_sum` for usage examples.
14353
14354Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
14355dimension, selecting a subset of dimension 0, specified by `indices`.
14356  }];
14357
14358  let arguments = (ins
14359    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
14360    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14361    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14362  );
14363
14364  let results = (outs
14365    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
14366has size `k`, the number of segments.}]>:$output
14367  );
14368
14369  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14370  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14371  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14372}
14373
14374def TF_SparseSegmentSqrtNOp : TF_Op<"SparseSegmentSqrtN", [NoSideEffect]> {
14375  let summary = [{
14376Computes the sum along sparse segments of a tensor divided by the sqrt of N.
14377  }];
14378
14379  let description = [{
14380N is the size of the segment being reduced.
14381
14382See `tf.sparse.segment_sum` for usage examples.
14383  }];
14384
14385  let arguments = (ins
14386    TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>:$data,
14387    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14388    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14389  );
14390
14391  let results = (outs
14392    Res<TensorOf<[TF_Bfloat16, TF_Float32, TF_Float64]>, [{Has same shape as data, except for dimension 0 which
14393has size `k`, the number of segments.}]>:$output
14394  );
14395
14396  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14397  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14398  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14399}
14400
14401def TF_SparseSegmentSumOp : TF_Op<"SparseSegmentSum", [NoSideEffect]> {
14402  let summary = "Computes the sum along sparse segments of a tensor.";
14403
14404  let description = [{
14405Read
14406[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
14407for an explanation of segments.
14408
14409Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
14410dimension, selecting a subset of dimension 0, specified by `indices`.
14411
14412For example:
14413
14414```python
14415c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
14416
14417# Select two rows, one segment.
14418tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
14419# => [[0 0 0 0]]
14420
14421# Select two rows, two segment.
14422tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
14423# => [[ 1  2  3  4]
14424#     [-1 -2 -3 -4]]
14425
14426# Select all rows, two segments.
14427tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
14428# => [[0 0 0 0]
14429#     [5 6 7 8]]
14430
14431# Which is equivalent to:
14432tf.segment_sum(c, tf.constant([0, 0, 1]))
14433```
14434  }];
14435
14436  let arguments = (ins
14437    TF_IntOrFpTensor:$data,
14438    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Has same rank as `segment_ids`.}]>:$indices,
14439    Arg<TF_I32OrI64Tensor, [{A 1-D tensor. Values should be sorted and can be repeated.}]>:$segment_ids
14440  );
14441
14442  let results = (outs
14443    Res<TF_IntOrFpTensor, [{Has same shape as data, except for dimension 0 which
14444has size `k`, the number of segments.}]>:$output
14445  );
14446
14447  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14448  TF_DerivedOperandTypeAttr Tsegmentids = TF_DerivedOperandTypeAttr<2>;
14449  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
14450}
14451
14452def TF_SparseSoftmaxCrossEntropyWithLogitsOp : TF_Op<"SparseSoftmaxCrossEntropyWithLogits", [NoSideEffect]> {
14453  let summary = [{
14454Computes softmax cross entropy cost and gradients to backpropagate.
14455  }];
14456
14457  let description = [{
14458Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept
14459a matrix of label probabilities, but rather a single label per row
14460of features.  This label is considered to have probability 1.0 for the
14461given row.
14462
14463Inputs are the logits, not probabilities.
14464  }];
14465
14466  let arguments = (ins
14467    Arg<TF_FloatTensor, [{batch_size x num_classes matrix}]>:$features,
14468    Arg<TF_I32OrI64Tensor, [{batch_size vector with values in [0, num_classes).
14469This is the label for the given minibatch entry.}]>:$labels
14470  );
14471
14472  let results = (outs
14473    Res<TF_FloatTensor, [{Per example loss (batch_size vector).}]>:$loss,
14474    Res<TF_FloatTensor, [{backpropagated gradients (batch_size x num_classes matrix).}]>:$backprop
14475  );
14476
14477  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14478  TF_DerivedOperandTypeAttr Tlabels = TF_DerivedOperandTypeAttr<1>;
14479
14480  let verifier = [{ return Verify(*this); }];
14481}
14482
14483def TF_SparseToDenseOp : TF_Op<"SparseToDense", [NoSideEffect]> {
14484  let summary = "Converts a sparse representation into a dense tensor.";
14485
14486  let description = [{
14487Builds an array `dense` with shape `output_shape` such that
14488
14489```
14490# If sparse_indices is scalar
14491dense[i] = (i == sparse_indices ? sparse_values : default_value)
14492
14493# If sparse_indices is a vector, then for each i
14494dense[sparse_indices[i]] = sparse_values[i]
14495
14496# If sparse_indices is an n by d matrix, then for each i in [0, n)
14497dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
14498```
14499
14500All other values in `dense` are set to `default_value`.  If `sparse_values` is a
14501scalar, all sparse indices are set to this single value.
14502
14503Indices should be sorted in lexicographic order, and indices must not
14504contain any repeats. If `validate_indices` is true, these properties
14505are checked during execution.
14506  }];
14507
14508  let arguments = (ins
14509    Arg<TF_I32OrI64Tensor, [{0-D, 1-D, or 2-D.  `sparse_indices[i]` contains the complete
14510index where `sparse_values[i]` will be placed.}]>:$sparse_indices,
14511    Arg<TF_I32OrI64Tensor, [{1-D.  Shape of the dense output tensor.}]>:$output_shape,
14512    Arg<TF_Tensor, [{1-D.  Values corresponding to each row of `sparse_indices`,
14513or a scalar value to be used for all sparse indices.}]>:$sparse_values,
14514    Arg<TF_Tensor, [{Scalar value to set for indices not specified in
14515`sparse_indices`.}]>:$default_value,
14516
14517    DefaultValuedAttr<BoolAttr, "true">:$validate_indices
14518  );
14519
14520  let results = (outs
14521    Res<TF_Tensor, [{Dense output tensor of shape `output_shape`.}]>:$dense
14522  );
14523
14524  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<0>;
14525  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
14526}
14527
14528def TF_SplitOp : TF_Op<"Split", [NoSideEffect]> {
14529  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
14530
14531  let arguments = (ins
14532    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
14533`[-rank(value), rank(value))`.}]>:$split_dim,
14534    Arg<TF_Tensor, [{The tensor to split.}]>:$value
14535  );
14536
14537  let results = (outs
14538    Res<Variadic<TF_Tensor>, [{They are identically shaped tensors, whose shape matches that of `value`
14539except along `axis`, where their sizes are
14540`values.shape[split_dim] / num_split`.}]>:$output
14541  );
14542
14543  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14544  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
14545
14546  let verifier = [{ return Verify(*this); }];
14547}
14548
14549def TF_SplitVOp : TF_Op<"SplitV", [NoSideEffect]> {
14550  let summary = "Splits a tensor into `num_split` tensors along one dimension.";
14551
14552  let arguments = (ins
14553    Arg<TF_Tensor, [{The tensor to split.}]>:$value,
14554    Arg<TF_I32OrI64Tensor, [{list containing the sizes of each output tensor along the split
14555dimension. Must sum to the dimension of value along split_dim.
14556Can contain one -1 indicating that dimension is to be inferred.}]>:$size_splits,
14557    Arg<TF_Int32Tensor, [{0-D.  The dimension along which to split.  Must be in the range
14558`[-rank(value), rank(value))`.}]>:$split_dim
14559  );
14560
14561  let results = (outs
14562    Res<Variadic<TF_Tensor>, [{Tensors whose shape matches that of `value`
14563except along `axis`, where their sizes are
14564`size_splits[i]`.}]>:$output
14565  );
14566
14567  TF_DerivedOperandTypeAttr Tlen = TF_DerivedOperandTypeAttr<1>;
14568  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14569  TF_DerivedResultSizeAttr num_split = TF_DerivedResultSizeAttr<0>;
14570
14571  let verifier = [{ return Verify(*this); }];
14572}
14573
14574def TF_SqrtOp : TF_Op<"Sqrt", [NoSideEffect, SameOperandsAndResultType]> {
14575  let summary = "Computes square root of x element-wise.";
14576
14577  let description = [{
14578I.e., \\(y = \sqrt{x} = x^{1/2}\\).
14579  }];
14580
14581  let arguments = (ins
14582    TF_FpOrComplexTensor:$x
14583  );
14584
14585  let results = (outs
14586    TF_FpOrComplexTensor:$y
14587  );
14588
14589  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14590}
14591
14592def TF_SqrtGradOp : TF_Op<"SqrtGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
14593  let summary = "Computes the gradient for the sqrt of `x` wrt its input.";
14594
14595  let description = [{
14596Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
14597is the corresponding input gradient.
14598  }];
14599
14600  let arguments = (ins
14601    TF_FpOrComplexTensor:$y,
14602    TF_FpOrComplexTensor:$dy
14603  );
14604
14605  let results = (outs
14606    TF_FpOrComplexTensor:$z
14607  );
14608
14609  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14610}
14611
14612def TF_SquareOp : TF_Op<"Square", [NoSideEffect, SameOperandsAndResultType]> {
14613  let summary = "Computes square of x element-wise.";
14614
14615  let description = [{
14616I.e., \\(y = x * x = x^2\\).
14617  }];
14618
14619  let arguments = (ins
14620    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
14621  );
14622
14623  let results = (outs
14624    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
14625  );
14626
14627  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14628
14629  let hasCanonicalizer = 1;
14630}
14631
14632def TF_SquaredDifferenceOp : TF_Op<"SquaredDifference", [Commutative, NoSideEffect, ResultsBroadcastableShape]>,
14633                             WithBroadcastableBinOpBuilder {
14634  let summary = "Returns conj(x - y)(x - y) element-wise.";
14635
14636  let description = [{
14637*NOTE*: `SquaredDifference` supports broadcasting. More about broadcasting
14638[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
14639  }];
14640
14641  let arguments = (ins
14642    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$x,
14643    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$y
14644  );
14645
14646  let results = (outs
14647    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>:$z
14648  );
14649
14650  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14651}
14652
14653def TF_SqueezeOp : TF_Op<"Squeeze", [NoSideEffect]> {
14654  let summary = "Removes dimensions of size 1 from the shape of a tensor.";
14655
14656  let description = [{
14657Given a tensor `input`, this operation returns a tensor of the same type with
14658all dimensions of size 1 removed. If you don't want to remove all size 1
14659dimensions, you can remove specific size 1 dimensions by specifying
14660`axis`.
14661
14662For example:
14663
14664```
14665# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
14666shape(squeeze(t)) ==> [2, 3]
14667```
14668
14669Or, to remove specific size 1 dimensions:
14670
14671```
14672# 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
14673shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1]
14674```
14675  }];
14676
14677  let arguments = (ins
14678    Arg<TF_Tensor, [{The `input` to squeeze.}]>:$input,
14679
14680    DefaultValuedAttr<I64ArrayAttr, "{}">:$squeeze_dims
14681  );
14682
14683  let results = (outs
14684    Res<TF_Tensor, [{Contains the same data as `input`, but has one or more dimensions of
14685size 1 removed.}]>:$output
14686  );
14687
14688  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14689}
14690
14691def TF_StackCloseV2Op : TF_Op<"StackCloseV2", []> {
14692  let summary = "Delete the stack from its resource container.";
14693
14694  let arguments = (ins
14695    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackFree]>:$handle
14696  );
14697
14698  let results = (outs);
14699}
14700
14701def TF_StackPopV2Op : TF_Op<"StackPopV2", []> {
14702  let summary = "Pop the element at the top of the stack.";
14703
14704  let arguments = (ins
14705    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle
14706  );
14707
14708  let results = (outs
14709    Res<TF_Tensor, [{The tensor that is popped from the top of the stack.}]>:$elem
14710  );
14711
14712  TF_DerivedResultTypeAttr elem_type = TF_DerivedResultTypeAttr<0>;
14713}
14714
14715def TF_StackPushV2Op : TF_Op<"StackPushV2", []> {
14716  let summary = "Push an element onto the stack.";
14717
14718  let arguments = (ins
14719    Arg<TF_ResourceTensor, [{The handle to a stack.}], [TF_StackRead, TF_StackWrite]>:$handle,
14720    Arg<TF_Tensor, [{The tensor to be pushed onto the stack.}]>:$elem,
14721
14722    DefaultValuedAttr<BoolAttr, "false">:$swap_memory
14723  );
14724
14725  let results = (outs
14726    Res<TF_Tensor, [{The same tensor as the input 'elem'.}]>:$output
14727  );
14728
14729  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
14730}
14731
14732def TF_StackV2Op : TF_Op<"StackV2", []> {
14733  let summary = "A stack that produces elements in first-in last-out order.";
14734
14735  let arguments = (ins
14736    Arg<TF_Int32Tensor, [{The maximum size of the stack if non-negative. If negative, the stack
14737size is unlimited.}]>:$max_size,
14738
14739    TypeAttr:$elem_type,
14740    StrAttr:$stack_name
14741  );
14742
14743  let results = (outs
14744    Res<TF_ResourceTensor, [{The handle to the stack.}], [TF_StackAlloc]>:$handle
14745  );
14746}
14747
14748def TF_StatelessMultinomialOp : TF_Op<"StatelessMultinomial", [NoSideEffect, TF_NoConstantFold]> {
14749  let summary = "Draws samples from a multinomial distribution.";
14750
14751  let arguments = (ins
14752    Arg<TF_IntOrFpTensor, [{2-D Tensor with shape `[batch_size, num_classes]`.  Each slice `[i, :]`
14753represents the unnormalized log probabilities for all classes.}]>:$logits,
14754    Arg<TF_Int32Tensor, [{0-D.  Number of independent samples to draw for each row slice.}]>:$num_samples,
14755    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14756  );
14757
14758  let results = (outs
14759    Res<TF_I32OrI64Tensor, [{2-D Tensor with shape `[batch_size, num_samples]`.  Each slice `[i, :]`
14760contains the drawn class labels with range `[0, num_classes)`.}]>:$output
14761  );
14762
14763  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14764  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<2>;
14765  TF_DerivedResultTypeAttr output_dtype = TF_DerivedResultTypeAttr<0>;
14766}
14767
14768def TF_StatelessParameterizedTruncatedNormalOp : TF_Op<"StatelessParameterizedTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
14769  let summary = "";
14770
14771  let arguments = (ins
14772    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14773    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14774    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The mean parameter of each batch.}]>:$means,
14775    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The standard deviation parameter of each batch. Must be greater than 0.}]>:$stddevs,
14776    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The minimum cutoff. May be -infinity.}]>:$minvals,
14777    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The maximum cutoff. May be +infinity, and must be more than the minval
14778for each batch.}]>:$maxvals
14779  );
14780
14781  let results = (outs
14782    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The outputs are truncated normal samples and are a deterministic function of
14783`shape`, `seed`, `minvals`, `maxvals`, `means` and `stddevs`.}]>:$output
14784  );
14785
14786  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
14787  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14788  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
14789}
14790
14791def TF_StatelessRandomBinomialOp : TF_Op<"StatelessRandomBinomial", [NoSideEffect, TF_NoConstantFold]> {
14792  let summary = [{
14793Outputs deterministic pseudorandom random numbers from a binomial distribution.
14794  }];
14795
14796  let description = [{
14797Outputs random values from a binomial distribution.
14798
14799The outputs are a deterministic function of `shape`, `seed`, `counts`, and `probs`.
14800  }];
14801
14802  let arguments = (ins
14803    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14804    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14805    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The counts of the binomial distribution. Must be broadcastable with `probs`,
14806and broadcastable with the rightmost dimensions of `shape`.}]>:$counts,
14807    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The probability of success for the binomial distribution. Must be broadcastable
14808with `counts` and broadcastable with the rightmost dimensions of `shape`.}]>:$probs
14809  );
14810
14811  let results = (outs
14812    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
14813  );
14814
14815  TF_DerivedOperandTypeAttr S = TF_DerivedOperandTypeAttr<0>;
14816  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
14817  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14818  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14819}
14820
14821def TF_StatelessRandomGammaV2Op : TF_Op<"StatelessRandomGammaV2", [NoSideEffect, TF_NoConstantFold]> {
14822  let summary = [{
14823Outputs deterministic pseudorandom random numbers from a gamma distribution.
14824  }];
14825
14826  let description = [{
14827Outputs random values from a gamma distribution.
14828
14829The outputs are a deterministic function of `shape`, `seed`, and `alpha`.
14830  }];
14831
14832  let arguments = (ins
14833    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14834    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14835    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{The concentration of the gamma distribution. Shape must match the rightmost
14836dimensions of `shape`.}]>:$alpha
14837  );
14838
14839  let results = (outs
14840    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64]>, [{Random values with specified shape.}]>:$output
14841  );
14842
14843  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14844  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14845  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
14846}
14847
14848def TF_StatelessRandomGetAlgOp : TF_Op<"StatelessRandomGetAlg", []> {
14849  let summary = "Picks the best counter-based RNG algorithm based on device.";
14850
14851  let description = [{
14852This op picks the best counter-based RNG algorithm based on device.
14853  }];
14854
14855  let arguments = (ins);
14856
14857  let results = (outs
14858    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14859  );
14860}
14861
14862def TF_StatelessRandomGetKeyCounterOp : TF_Op<"StatelessRandomGetKeyCounter", []> {
14863  let summary = [{
14864Scrambles seed into key and counter, using the best algorithm based on device.
14865  }];
14866
14867  let description = [{
14868This op scrambles a shape-[2] seed into a key and a counter, both needed by counter-based RNG algorithms. The scrambing uses the best algorithm based on device. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
14869  }];
14870
14871  let arguments = (ins
14872    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14873  );
14874
14875  let results = (outs
14876    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14877    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter
14878  );
14879
14880  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
14881}
14882
14883def TF_StatelessRandomGetKeyCounterAlgOp : TF_Op<"StatelessRandomGetKeyCounterAlg", [NoSideEffect, TF_NoConstantFold]> {
14884  let summary = [{
14885Picks the best algorithm based on device, and scrambles seed into key and counter.
14886  }];
14887
14888  let description = [{
14889This op picks the best counter-based RNG algorithm based on device, and scrambles a shape-[2] seed into a key and a counter, both needed by the counter-based algorithm. The scrambling is opaque but approximately satisfies the property that different seed results in different key/counter pair (which will in turn result in different random numbers).
14890  }];
14891
14892  let arguments = (ins
14893    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14894  );
14895
14896  let results = (outs
14897    Res<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14898    Res<TF_Uint64Tensor, [{Counter for the counter-based RNG algorithm. Since counter size is algorithm-dependent, this output will be right-padded with zeros to reach shape uint64[2] (the current maximal counter size among algorithms).}]>:$counter,
14899    Res<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14900  );
14901
14902  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<0>;
14903}
14904
14905def TF_StatelessRandomNormalOp : TF_Op<"StatelessRandomNormal", [NoSideEffect, TF_NoConstantFold]> {
14906  let summary = [{
14907Outputs deterministic pseudorandom values from a normal distribution.
14908  }];
14909
14910  let description = [{
14911The generated values will have mean 0 and standard deviation 1.
14912
14913The outputs are a deterministic function of `shape` and `seed`.
14914  }];
14915
14916  let arguments = (ins
14917    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14918    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14919  );
14920
14921  let results = (outs
14922    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14923  );
14924
14925  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14926  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14927  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14928}
14929
14930def TF_StatelessRandomNormalV2Op : TF_Op<"StatelessRandomNormalV2", [NoSideEffect]> {
14931  let summary = [{
14932Outputs deterministic pseudorandom values from a normal distribution.
14933  }];
14934
14935  let description = [{
14936The generated values will have mean 0 and standard deviation 1.
14937
14938The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
14939  }];
14940
14941  let arguments = (ins
14942    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14943    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
14944    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
14945    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
14946  );
14947
14948  let results = (outs
14949    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
14950  );
14951
14952  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
14953  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14954}
14955
14956def TF_StatelessRandomPoissonOp : TF_Op<"StatelessRandomPoisson", [NoSideEffect, TF_NoConstantFold]> {
14957  let summary = [{
14958Outputs deterministic pseudorandom random numbers from a Poisson distribution.
14959  }];
14960
14961  let description = [{
14962Outputs random values from a Poisson distribution.
14963
14964The outputs are a deterministic function of `shape`, `seed`, and `lam`.
14965  }];
14966
14967  let arguments = (ins
14968    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14969    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
14970    Arg<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{The rate of the Poisson distribution. Shape must match the rightmost dimensions
14971of `shape`.}]>:$lam
14972  );
14973
14974  let results = (outs
14975    Res<TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Int32, TF_Int64]>, [{Random values with specified shape.}]>:$output
14976  );
14977
14978  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
14979  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
14980  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
14981  TF_DerivedOperandTypeAttr Rtype = TF_DerivedOperandTypeAttr<2>;
14982}
14983
14984def TF_StatelessRandomUniformOp : TF_Op<"StatelessRandomUniform", [NoSideEffect, TF_NoConstantFold]> {
14985  let summary = [{
14986Outputs deterministic pseudorandom random values from a uniform distribution.
14987  }];
14988
14989  let description = [{
14990The generated values follow a uniform distribution in the range `[0, 1)`. The
14991lower bound 0 is included in the range, while the upper bound 1 is excluded.
14992
14993The outputs are a deterministic function of `shape` and `seed`.
14994  }];
14995
14996  let arguments = (ins
14997    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
14998    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
14999  );
15000
15001  let results = (outs
15002    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15003  );
15004
15005  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15006  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15007  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15008}
15009
15010def TF_StatelessRandomUniformFullIntOp : TF_Op<"StatelessRandomUniformFullInt", [NoSideEffect, TF_NoConstantFold]> {
15011  let summary = [{
15012Outputs deterministic pseudorandom random integers from a uniform distribution.
15013  }];
15014
15015  let description = [{
15016The generated values are uniform integers covering the whole range of `dtype`.
15017
15018The outputs are a deterministic function of `shape` and `seed`.
15019  }];
15020
15021  let arguments = (ins
15022    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15023    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{2 seeds (shape [2]).}]>:$seed
15024  );
15025
15026  let results = (outs
15027    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
15028  );
15029
15030  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15031  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15032  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15033}
15034
15035def TF_StatelessRandomUniformFullIntV2Op : TF_Op<"StatelessRandomUniformFullIntV2", [NoSideEffect]> {
15036  let summary = [{
15037Outputs deterministic pseudorandom random integers from a uniform distribution.
15038  }];
15039
15040  let description = [{
15041The generated values are uniform integers covering the whole range of `dtype`.
15042
15043The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
15044  }];
15045
15046  let arguments = (ins
15047    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15048    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
15049    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
15050    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
15051  );
15052
15053  let results = (outs
15054    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
15055  );
15056
15057  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15058  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15059}
15060
15061def TF_StatelessRandomUniformIntOp : TF_Op<"StatelessRandomUniformInt", [NoSideEffect, TF_NoConstantFold]> {
15062  let summary = [{
15063Outputs deterministic pseudorandom random integers from a uniform distribution.
15064  }];
15065
15066  let description = [{
15067The generated values follow a uniform distribution in the range `[minval, maxval)`.
15068
15069The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.
15070  }];
15071
15072  let arguments = (ins
15073    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15074    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed,
15075    Arg<TF_I32OrI64Tensor, [{Minimum value (inclusive, scalar).}]>:$minval,
15076    Arg<TF_I32OrI64Tensor, [{Maximum value (exclusive, scalar).}]>:$maxval
15077  );
15078
15079  let results = (outs
15080    Res<TF_I32OrI64Tensor, [{Random values with specified shape.}]>:$output
15081  );
15082
15083  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15084  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15085  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<2>;
15086}
15087
15088def TF_StatelessRandomUniformIntV2Op : TF_Op<"StatelessRandomUniformIntV2", [NoSideEffect]> {
15089  let summary = [{
15090Outputs deterministic pseudorandom random integers from a uniform distribution.
15091  }];
15092
15093  let description = [{
15094The generated values follow a uniform distribution in the range `[minval, maxval)`.
15095
15096The outputs are a deterministic function of `shape`, `key`, `counter`, `alg`, `minval` and `maxval`.
15097  }];
15098
15099  let arguments = (ins
15100    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15101    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
15102    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
15103    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg,
15104    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Minimum value (inclusive, scalar).}]>:$minval,
15105    Arg<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Maximum value (exclusive, scalar).}]>:$maxval
15106  );
15107
15108  let results = (outs
15109    Res<TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>, [{Random values with specified shape.}]>:$output
15110  );
15111
15112  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15113  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<4>;
15114}
15115
15116def TF_StatelessRandomUniformV2Op : TF_Op<"StatelessRandomUniformV2", [NoSideEffect]> {
15117  let summary = [{
15118Outputs deterministic pseudorandom random values from a uniform distribution.
15119  }];
15120
15121  let description = [{
15122The generated values follow a uniform distribution in the range `[0, 1)`. The
15123lower bound 0 is included in the range, while the upper bound 1 is excluded.
15124
15125The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
15126  }];
15127
15128  let arguments = (ins
15129    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15130    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
15131    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
15132    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
15133  );
15134
15135  let results = (outs
15136    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15137  );
15138
15139  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15140  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15141}
15142
15143def TF_StatelessTruncatedNormalOp : TF_Op<"StatelessTruncatedNormal", [NoSideEffect, TF_NoConstantFold]> {
15144  let summary = [{
15145Outputs deterministic pseudorandom values from a truncated normal distribution.
15146  }];
15147
15148  let description = [{
15149The generated values follow a normal distribution with mean 0 and standard
15150deviation 1, except that values whose magnitude is more than 2 standard
15151deviations from the mean are dropped and re-picked.
15152
15153The outputs are a deterministic function of `shape` and `seed`.
15154  }];
15155
15156  let arguments = (ins
15157    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15158    Arg<TF_I32OrI64Tensor, [{2 seeds (shape [2]).}]>:$seed
15159  );
15160
15161  let results = (outs
15162    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15163  );
15164
15165  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15166  TF_DerivedOperandTypeAttr Tseed = TF_DerivedOperandTypeAttr<1>;
15167  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15168}
15169
15170def TF_StatelessTruncatedNormalV2Op : TF_Op<"StatelessTruncatedNormalV2", [NoSideEffect]> {
15171  let summary = [{
15172Outputs deterministic pseudorandom values from a truncated normal distribution.
15173  }];
15174
15175  let description = [{
15176The generated values follow a normal distribution with mean 0 and standard
15177deviation 1, except that values whose magnitude is more than 2 standard
15178deviations from the mean are dropped and re-picked.
15179
15180The outputs are a deterministic function of `shape`, `key`, `counter` and `alg`.
15181  }];
15182
15183  let arguments = (ins
15184    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
15185    Arg<TF_Uint64Tensor, [{Key for the counter-based RNG algorithm (shape uint64[1]).}]>:$key,
15186    Arg<TF_Uint64Tensor, [{Initial counter for the counter-based RNG algorithm (shape uint64[2] or uint64[1] depending on the algorithm). If a larger vector is given, only the needed portion on the left (i.e. [:N]) will be used.}]>:$counter,
15187    Arg<TF_Int32Tensor, [{The RNG algorithm (shape int32[]).}]>:$alg
15188  );
15189
15190  let results = (outs
15191    Res<TF_FloatTensor, [{Random values with specified shape.}]>:$output
15192  );
15193
15194  TF_DerivedOperandTypeAttr Tshape = TF_DerivedOperandTypeAttr<0>;
15195  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
15196}
15197
15198def TF_StopGradientOp : TF_Op<"StopGradient", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>]> {
15199  let summary = "Stops gradient computation.";
15200
15201  let description = [{
15202When executed in a graph, this op outputs its input tensor as-is.
15203
15204When building ops to compute gradients, this op prevents the contribution of
15205its inputs to be taken into account.  Normally, the gradient generator adds ops
15206to a graph to compute the derivatives of a specified 'loss' by recursively
15207finding out inputs that contributed to its computation.  If you insert this op
15208in the graph it inputs are masked from the gradient generator.  They are not
15209taken into account for computing gradients.
15210
15211This is useful any time you want to compute a value with TensorFlow but need
15212to pretend that the value was a constant. For example, the softmax function
15213for a vector x can be written as
15214
15215```python
15216
15217  def softmax(x):
15218    numerator = tf.exp(x)
15219    denominator = tf.reduce_sum(numerator)
15220    return numerator / denominator
15221```
15222
15223This however is susceptible to overflow if the values in x are large. An
15224alternative more stable way is to subtract the maximum of x from each of the
15225values.
15226
15227```python
15228
15229  def stable_softmax(x):
15230    z = x - tf.reduce_max(x)
15231    numerator = tf.exp(z)
15232    denominator = tf.reduce_sum(numerator)
15233    return numerator / denominator
15234```
15235
15236However, when we backprop through the softmax to x, we dont want to backprop
15237through the `tf.reduce_max(x)` (if the max values are not unique then the
15238gradient could flow to the wrong input) calculation and treat that as a
15239constant. Therefore, we should write this out as
15240
15241```python
15242
15243  def stable_softmax(x):
15244    z = x - tf.stop_gradient(tf.reduce_max(x))
15245    numerator = tf.exp(z)
15246    denominator = tf.reduce_sum(numerator)
15247    return numerator / denominator
15248```
15249
15250Some other examples include:
15251
15252*  The *EM* algorithm where the *M-step* should not involve backpropagation
15253   through the output of the *E-step*.
15254*  Contrastive divergence training of Boltzmann machines where, when
15255   differentiating the energy function, the training must not backpropagate
15256   through the graph that generated the samples from the model.
15257*  Adversarial training, where no backprop should happen through the adversarial
15258   example generation process.
15259  }];
15260
15261  let arguments = (ins
15262    TF_Tensor:$input
15263  );
15264
15265  let results = (outs
15266    TF_Tensor:$output
15267  );
15268
15269  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15270}
15271
15272def TF_StridedSliceOp : TF_Op<"StridedSlice", [NoSideEffect]> {
15273  let summary = "Return a strided slice from `input`.";
15274
15275  let description = [{
15276Note, most python users will want to use the Python `Tensor.__getitem__`
15277or `Variable.__getitem__` rather than this op directly.
15278
15279The goal of this op is to produce a new tensor with a subset of
15280the elements from the `n` dimensional `input` tensor. The subset is chosen using
15281a sequence of `m` sparse range specifications encoded into the arguments
15282of this function. Note, in some cases
15283`m` could be equal to `n`, but this need not be the case. Each
15284range specification entry can be one of the following:
15285
15286- An ellipsis (...). Ellipses are used to imply zero or more
15287  dimensions of full-dimension selection and are produced using
15288  `ellipsis_mask`. For example, `foo[...]` is the identity slice.
15289
15290- A new axis. This is used to insert a new shape=1 dimension and is
15291  produced using `new_axis_mask`. For example, `foo[:, ...]` where
15292  `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor.
15293
15294
15295- A range `begin:end:stride`. This is used to specify how much to choose from
15296  a given dimension. `stride` can be any integer but 0.  `begin` is an integer
15297  which represents the index of the first value to select while `end` represents
15298  the index of the last value to select. The number of values selected in each
15299  dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`.
15300  `begin` and `end` can be negative where `-1` is the last element, `-2` is
15301  the second to last. `begin_mask` controls whether to replace the explicitly
15302  given `begin` with an implicit effective value of `0` if `stride > 0` and
15303  `-1` if `stride < 0`. `end_mask` is analogous but produces the number
15304  required to create the largest open interval. For example, given a shape
15305  `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do
15306  not assume this is equivalent to `foo[0:-1]` which has an effective `begin`
15307  and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the
15308  first dimension of a tensor while dropping the last two (in the original
15309  order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`.
15310
15311- A single index. This is used to keep only elements that have a given
15312  index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a
15313  shape `(6,)` tensor. This is encoded in `begin` and `end` and
15314  `shrink_axis_mask`.
15315
15316Each conceptual range specification is encoded in the op's argument. This
15317encoding is best understand by considering a non-trivial example. In
15318particular,
15319`foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as
15320
15321```
15322begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
15323end = [2, 4, x, x, -3, x]
15324strides = [1, 1, x, x, -1, 1]
15325begin_mask = 1<<4 | 1<<5 = 48
15326end_mask = 1<<5 = 32
15327ellipsis_mask = 1<<3 = 8
15328new_axis_mask = 1<<2 = 4
15329shrink_axis_mask = 1<<0 = 1
15330```
15331
15332In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of
15333the slice becomes (2, 1, 5, 5, 2, 5).
15334Let us walk step by step through each argument specification.
15335
153361.  The first argument in the example slice is turned into `begin = 1` and
15337`end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we
15338also set the appropriate bit in `shrink_axis_mask`.
15339
153402. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have
15341zero bits contributed.
15342
153433. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1
15344dimension in the final shape. Dummy values are contributed to begin,
15345end and stride, while the new_axis_mask bit is set.
15346
153474. `...` grab the full ranges from as many dimensions as needed to
15348fully specify a slice for every dimension of the input shape.
15349
153505. `:-3:-1` shows the use of negative indices. A negative index `i` associated
15351with a dimension that has shape `s` is converted to a positive index
15352`s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion
15353is done internally so begin, end and strides receive x, -3, and -1.
15354The appropriate begin_mask bit is set to indicate the start range is the
15355full range (ignoring the x).
15356
153576. `:` indicates that the entire contents of the corresponding dimension
15358is selected. This is equivalent to `::` or `0::1`. begin, end, and strides
15359receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and
15360`end_mask` are also set.
15361
15362*Requirements*:
15363  `0 != strides[i] for i in [0, m)`
15364  `ellipsis_mask must be a power of two (only one ellipsis)`
15365  }];
15366
15367  let arguments = (ins
15368    TF_Tensor:$input,
15369    Arg<TF_I32OrI64Tensor, [{`begin[k]` specifies the offset into the `k`th range specification.
15370The exact dimension this corresponds to will be determined by context.
15371Out-of-bounds values will be silently clamped. If the `k`th bit of
15372`begin_mask` then `begin[k]` is ignored and the full range of the
15373appropriate dimension is used instead. Negative values causes indexing
15374to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`.}]>:$begin,
15375    Arg<TF_I32OrI64Tensor, [{`end[i]` is like `begin` with the exception that `end_mask` is
15376used to determine full ranges.}]>:$end,
15377    Arg<TF_I32OrI64Tensor, [{`strides[i]` specifies the increment in the `i`th specification
15378after extracting a given element. Negative indices will reverse
15379the original order. Out or range values are
15380clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0`}]>:$strides,
15381
15382    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
15383    DefaultValuedAttr<I64Attr, "0">:$end_mask,
15384    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
15385    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
15386    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
15387  );
15388
15389  let results = (outs
15390    TF_Tensor:$output
15391  );
15392
15393  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15394  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
15395
15396  let hasFolder = 1;
15397
15398  let verifier = [{ return VerifyStridedSliceBase(*this); }];
15399
15400  let extraClassDeclaration = [{
15401    // If sliced shape is able to be deduced, returns true, updates
15402    // `begin_indices`, `end_indices`, and `strides` with their canonical
15403    // values, respectively.
15404    bool GetSlicedBoundRanges(
15405      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
15406      ::llvm::SmallVectorImpl<int64_t> *slice_end,
15407      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
15408  }];
15409}
15410
15411def TF_StridedSliceGradOp : TF_Op<"StridedSliceGrad", [NoSideEffect]> {
15412  let summary = "Returns the gradient of `StridedSlice`.";
15413
15414  let description = [{
15415Since `StridedSlice` cuts out pieces of its `input` which is size
15416`shape`, its gradient will have the same shape (which is passed here
15417as `shape`). The gradient will be zero in any element that the slice
15418does not select.
15419
15420Arguments are the same as StridedSliceGrad with the exception that
15421`dy` is the input gradient to be propagated and `shape` is the
15422shape of `StridedSlice`'s `input`.
15423  }];
15424
15425  let arguments = (ins
15426    TF_I32OrI64Tensor:$shape,
15427    TF_I32OrI64Tensor:$begin,
15428    TF_I32OrI64Tensor:$end,
15429    TF_I32OrI64Tensor:$strides,
15430    TF_Tensor:$dy,
15431
15432    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
15433    DefaultValuedAttr<I64Attr, "0">:$end_mask,
15434    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
15435    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
15436    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
15437  );
15438
15439  let results = (outs
15440    TF_Tensor:$output
15441  );
15442
15443  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<4>;
15444  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<0>;
15445
15446  let verifier = [{ return Verify(*this); }];
15447
15448  let extraClassDeclaration = [{
15449    // If sliced shape is able to be deduced, returns true, updates `shape`
15450    // with the final shape after performing StridedSlice, and updates
15451    // `begin_indices`, `end_indices`, and `strides` with their canonical
15452    // values, respectively.
15453    bool GetSlicedShapeAndBoundRanges(
15454      ::llvm::SmallVectorImpl<int64_t> *input_shape,
15455      ::llvm::SmallVectorImpl<int64_t> *slice_begin,
15456      ::llvm::SmallVectorImpl<int64_t> *slice_end,
15457      ::llvm::SmallVectorImpl<int64_t> *slice_stride);
15458  }];
15459}
15460
15461def TF_StringJoinOp : TF_Op<"StringJoin", [NoSideEffect]> {
15462  let summary = [{
15463Joins the strings in the given list of string tensors into one tensor;
15464  }];
15465
15466  let description = [{
15467with the given separator (default is an empty separator).
15468
15469Examples:
15470
15471>>> s = ["hello", "world", "tensorflow"]
15472>>> tf.strings.join(s, " ")
15473<tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'>
15474  }];
15475
15476  let arguments = (ins
15477    Arg<Variadic<TF_StrTensor>, [{A list of string tensors.  The tensors must all have the same shape,
15478or be scalars.  Scalars may be mixed in; these will be broadcast to the shape
15479of non-scalar inputs.}]>:$inputs,
15480
15481    StrAttr:$separator
15482  );
15483
15484  let results = (outs
15485    TF_StrTensor:$output
15486  );
15487
15488  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15489}
15490
15491def TF_StringToHashBucketFastOp : TF_Op<"StringToHashBucketFast", [NoSideEffect]> {
15492  let summary = [{
15493Converts each string in the input Tensor to its hash mod by a number of buckets.
15494  }];
15495
15496  let description = [{
15497The hash function is deterministic on the content of the string within the
15498process and will never change. However, it is not suitable for cryptography.
15499This function may be used when CPU time is scarce and inputs are trusted or
15500unimportant. There is a risk of adversaries constructing inputs that all hash
15501to the same bucket. To prevent this problem, use a strong hash function with
15502`tf.string_to_hash_bucket_strong`.
15503
15504Examples:
15505
15506>>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy()
15507array([0, 2, 2])
15508  }];
15509
15510  let arguments = (ins
15511    Arg<TF_StrTensor, [{The strings to assign a hash bucket.}]>:$input,
15512
15513    Confined<I64Attr, [IntMinValue<1>]>:$num_buckets
15514  );
15515
15516  let results = (outs
15517    Res<TF_Int64Tensor, [{A Tensor of the same shape as the input `string_tensor`.}]>:$output
15518  );
15519}
15520
15521def TF_SubOp : TF_Op<"Sub", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_SameOperandsAndResultElementTypeResolveRef]>,
15522               WithBroadcastableBinOpBuilder {
15523  let summary = "Returns x - y element-wise.";
15524
15525  let description = [{
15526*NOTE*: `Subtract` supports broadcasting. More about broadcasting
15527[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
15528  }];
15529
15530  let arguments = (ins
15531    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
15532    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
15533  );
15534
15535  let results = (outs
15536    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
15537  );
15538
15539  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15540
15541  let hasCanonicalizer = 1;
15542
15543  let hasFolder = 1;
15544}
15545
15546def TF_SumOp : TF_Op<"Sum", [NoSideEffect]> {
15547  let summary = "Computes the sum of elements across dimensions of a tensor.";
15548
15549  let description = [{
15550Reduces `input` along the dimensions given in `axis`. Unless
15551`keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
15552`axis`. If `keep_dims` is true, the reduced dimensions are
15553retained with length 1.
15554  }];
15555
15556  let arguments = (ins
15557    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The tensor to reduce.}]>:$input,
15558    Arg<TF_I32OrI64Tensor, [{The dimensions to reduce. Must be in the range
15559`[-rank(input), rank(input))`.}]>:$reduction_indices,
15560
15561    DefaultValuedAttr<BoolAttr, "false">:$keep_dims
15562  );
15563
15564  let results = (outs
15565    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The reduced tensor.}]>:$output
15566  );
15567
15568  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15569  TF_DerivedOperandTypeAttr Tidx = TF_DerivedOperandTypeAttr<1>;
15570
15571  let builders = [
15572    OpBuilderDAG<(ins "Value":$input, "Value":$reduction_indices,
15573      "BoolAttr":$keep_dims)>
15574  ];
15575
15576  let hasFolder = 1;
15577}
15578
15579def TF_SvdOp : TF_Op<"Svd", [NoSideEffect]> {
15580  let summary = [{
15581Computes the singular value decompositions of one or more matrices.
15582  }];
15583
15584  let description = [{
15585Computes the SVD of each inner matrix in `input` such that
15586`input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
15587
15588```python
15589# a is a tensor containing a batch of matrices.
15590# s is a tensor of singular values for each matrix.
15591# u is the tensor containing the left singular vectors for each matrix.
15592# v is the tensor containing the right singular vectors for each matrix.
15593s, u, v = svd(a)
15594s, _, _ = svd(a, compute_uv=False)
15595```
15596  }];
15597
15598  let arguments = (ins
15599    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
15600form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.}]>:$input,
15601
15602    DefaultValuedAttr<BoolAttr, "true">:$compute_uv,
15603    DefaultValuedAttr<BoolAttr, "false">:$full_matrices
15604  );
15605
15606  let results = (outs
15607    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Singular values. Shape is `[..., P]`.}]>:$s,
15608    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
15609`[..., M, P]`; if `full_matrices` is `True` then shape is
15610`[..., M, M]`. Undefined if `compute_uv` is `False`.}]>:$u,
15611    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>, [{Left singular vectors. If `full_matrices` is `False` then shape is
15612`[..., N, P]`. If `full_matrices` is `True` then shape is `[..., N, N]`.
15613Undefined if `compute_uv` is false.}]>:$v
15614  );
15615
15616  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15617}
15618
15619def TF_SymbolicGradientOp : TF_Op<"SymbolicGradient", [NoSideEffect]> {
15620  let summary = [{
15621Computes the gradient function for function f via backpropagation.
15622  }];
15623
15624  let arguments = (ins
15625    Arg<Variadic<TF_Tensor>, [{a list of input tensors of size N + M;}]>:$input,
15626
15627    SymbolRefAttr:$f
15628  );
15629
15630  let results = (outs
15631    Res<Variadic<TF_Tensor>, [{a list of output tensors of size N;}]>:$output
15632  );
15633
15634  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
15635  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
15636}
15637
15638def TF_TPUCompilationResultOp : TF_Op<"TPUCompilationResult", [NoSideEffect]> {
15639  let summary = "Returns the result of a TPU compilation.";
15640
15641  let description = [{
15642This operation returns the result of a TPU compilation as a serialized
15643CompilationResultProto, which holds a status and an error message if an error
15644occurred during compilation.
15645  }];
15646
15647  let arguments = (ins);
15648
15649  let results = (outs
15650    TF_StrTensor:$output
15651  );
15652}
15653
15654def TF_TPUCompileSucceededAssertOp : TF_Op<"TPUCompileSucceededAssert", []> {
15655  let summary = [{
15656Asserts that compilation succeeded. This op produces no output and closes the
15657  }];
15658
15659  let description = [{
15660device during failure to ensure all pending device interactions fail.
15661
15662'compilation_status' is a serialized CompilationResultProto.
15663  }];
15664
15665  let arguments = (ins
15666    TF_StrTensor:$compilation_status
15667  );
15668
15669  let results = (outs);
15670}
15671
15672def TF_TPUCopyWithLayoutOp : TF_Op<"TPUCopyWithLayout", [NoSideEffect]> {
15673  let summary = "Op that copies host tensor to device with specified layout.";
15674
15675  let description = [{
15676For internal use only.
15677  }];
15678
15679  let arguments = (ins
15680    TF_Tensor:$input,
15681    TF_Int64Tensor:$layout
15682  );
15683
15684  let results = (outs
15685    TF_Tensor:$output
15686  );
15687
15688  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15689}
15690
15691def TF_TPUEmbeddingActivationsOp : TF_Op<"TPUEmbeddingActivations", [NoSideEffect]> {
15692  let summary = "An op enabling differentiation of TPU Embeddings.";
15693
15694  let description = [{
15695This op simply returns its first input, which is assumed to have been sliced
15696from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of
15697this op, and its first argument being a trainable Variable, enables automatic
15698differentiation of graphs containing embeddings via the TPU Embedding Python
15699libraries.
15700  }];
15701
15702  let arguments = (ins
15703    Arg<TF_Float32Tensor, [{A trainable variable, enabling optimizers to find this op.}]>:$embedding_variable,
15704    Arg<TF_Float32Tensor, [{The embedding activations Tensor to return.}]>:$sliced_activations,
15705
15706    Confined<I64Attr, [IntMinValue<0>]>:$table_id,
15707    Confined<I64Attr, [IntMinValue<0>]>:$lookup_id
15708  );
15709
15710  let results = (outs
15711    TF_Float32Tensor:$output
15712  );
15713}
15714
15715def TF_TPUExecuteOp : TF_Op<"TPUExecute", [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>]> {
15716  let summary = "Op that loads and executes a TPU program on a TPU device.";
15717
15718  let description = [{
15719For the internal use of the distributed TPU compiler.
15720  }];
15721
15722  let arguments = (ins
15723    Variadic<TF_Tensor>:$args,
15724    TF_StrTensor:$key
15725  );
15726
15727  let results = (outs
15728    Variadic<TF_Tensor>:$results
15729  );
15730
15731  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
15732  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
15733}
15734
15735def TF_TPUExecuteAndUpdateVariablesOp : TF_Op<"TPUExecuteAndUpdateVariables", [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>]> {
15736  let summary = [{
15737Op that executes a program with optional in-place variable updates.
15738  }];
15739
15740  let description = [{
15741It (optionally) reads device variables, loads and executes a TPU program on a
15742TPU device, and then (optionally) in-place updates variables using the program
15743outputs, as specified in attributes device_var_reads_indices (program input
15744indices from directly reading variables) and device_var_updates_indices (program
15745output indices used to update variables, -1 means no-update/read-only). Such
15746program outputs are consumed by these variables will not appear in the op
15747output. For the internal use of the distributed TPU compiler.
15748  }];
15749
15750  let arguments = (ins
15751    Variadic<TF_Tensor>:$args,
15752    TF_StrTensor:$key,
15753
15754    I64ArrayAttr:$device_var_reads_indices,
15755    I64ArrayAttr:$device_var_updates_indices
15756  );
15757
15758  let results = (outs
15759    Variadic<TF_Tensor>:$results
15760  );
15761
15762  TF_DerivedOperandTypeListAttr Targs = TF_DerivedOperandTypeListAttr<0>;
15763  TF_DerivedResultTypeListAttr Tresults = TF_DerivedResultTypeListAttr<0>;
15764
15765  let verifier = [{ return Verify(*this); }];
15766}
15767
15768def TF_TPUGetLayoutOp : TF_Op<"TPUGetLayoutOp", [NoSideEffect]> {
15769  let summary = [{
15770Op that retrieves the layout of an input or output determined by TPUCompile.
15771  }];
15772
15773  let description = [{
15774For internal use only.
15775  }];
15776
15777  let arguments = (ins
15778    TF_StrTensor:$cache_key,
15779
15780    I64Attr:$index,
15781    BoolAttr:$is_output
15782  );
15783
15784  let results = (outs
15785    TF_Int64Tensor:$layout
15786  );
15787}
15788
15789def TF_TPUOrdinalSelectorOp : TF_Op<"TPUOrdinalSelector", []> {
15790  let summary = "A TPU core selector Op.";
15791
15792  let description = [{
15793This Op produces a set of TPU cores (for warm-up) or a single TPU core
15794(for regular inference) to execute the TPU program on. The output is
15795consumed by TPUPartitionedCall.
15796  }];
15797
15798  let arguments = (ins);
15799
15800  let results = (outs
15801    Res<TF_Int32Tensor, [{A vector 1 or more TPU cores.}]>:$device_ordinals
15802  );
15803}
15804
15805def TF_TPUReplicatedInputOp : TF_Op<"TPUReplicatedInput", [NoSideEffect]> {
15806  let summary = "Connects N inputs to an N-way replicated TPU computation.";
15807
15808  let description = [{
15809This operation holds a replicated input to a `tpu.replicate()` computation subgraph.
15810Each replicated input has the same shape and type alongside the output.
15811
15812For example:
15813```
15814%a = "tf.opA"()
15815%b = "tf.opB"()
15816%replicated_input = "tf.TPUReplicatedInput"(%a, %b)
15817%computation = "tf.Computation"(%replicated_input)
15818```
15819The above computation has a replicated input of two replicas.
15820  }];
15821
15822  let arguments = (ins
15823    Variadic<TF_Tensor>:$inputs,
15824
15825    DefaultValuedAttr<BoolAttr, "false">:$is_mirrored_variable,
15826    DefaultValuedAttr<I64Attr, "-1">:$index,
15827    DefaultValuedAttr<BoolAttr, "false">:$is_packed
15828  );
15829
15830  let results = (outs
15831    TF_Tensor:$output
15832  );
15833
15834  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15835  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15836}
15837
15838def TF_TPUReplicatedOutputOp : TF_Op<"TPUReplicatedOutput", [NoSideEffect]> {
15839  let summary = "Connects N outputs from an N-way replicated TPU computation.";
15840
15841  let description = [{
15842This operation holds a replicated output from a `tpu.replicate()` computation subgraph.
15843Each replicated output has the same shape and type alongside the input.
15844
15845For example:
15846```
15847%computation = "tf.Computation"()
15848%replicated_output:2 = "tf.TPUReplicatedOutput"(%computation)
15849```
15850The above computation has a replicated output of two replicas.
15851  }];
15852
15853  let arguments = (ins
15854    TF_Tensor:$input
15855  );
15856
15857  let results = (outs
15858    Variadic<TF_Tensor>:$outputs
15859  );
15860
15861  TF_DerivedResultSizeAttr num_replicas = TF_DerivedResultSizeAttr<0>;
15862  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15863}
15864
15865def TF_TPUReshardVariablesOp : TF_Op<"TPUReshardVariables", []> {
15866  let summary = [{
15867Op that reshards on-device TPU variables to specified state. Internal use only.
15868  }];
15869
15870  let description = [{
15871The sharding state is represented as the key of the compilation that generated
15872the sharding/unsharding programs along with the main program. new_format_key
15873specifies the desired state, and format_state_var is the current state of the
15874variables.
15875  }];
15876
15877  let arguments = (ins
15878    Arg<Variadic<TF_ResourceTensor>, "", [TF_VariableRead, TF_VariableWrite]>:$vars,
15879    TF_StrTensor:$new_format_key,
15880    Arg<TF_ResourceTensor, "", [TF_VariableRead, TF_VariableWrite]>:$format_state_var
15881  );
15882
15883  let results = (outs);
15884
15885  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
15886}
15887
15888def TF_TanOp : TF_Op<"Tan", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
15889  let summary = "Computes tan of x element-wise.";
15890
15891  let description = [{
15892Given an input tensor, this function computes tangent of every
15893  element in the tensor. Input range is `(-inf, inf)` and
15894  output range is `(-inf, inf)`. If input lies outside the boundary, `nan`
15895  is returned.
15896
15897  ```python
15898  x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")])
15899  tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan]
15900  ```
15901  }];
15902
15903  let arguments = (ins
15904    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$x
15905  );
15906
15907  let results = (outs
15908    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8]>:$y
15909  );
15910
15911  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15912}
15913
15914def TF_TanhOp : TF_Op<"Tanh", [NoSideEffect, TF_LayoutAgnostic, TF_SameOperandsAndResultTypeResolveRef]> {
15915  let summary = "Computes hyperbolic tangent of `x` element-wise.";
15916
15917  let description = [{
15918Given an input tensor, this function computes hyperbolic tangent of every
15919  element in the tensor. Input range is `[-inf, inf]` and
15920  output range is `[-1,1]`.
15921
15922  >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")])
15923  >>> tf.math.tanh(x)
15924  <tf.Tensor: shape=(8,), dtype=float32, numpy=
15925  array([-1.        , -0.99990916, -0.46211717,  0.7615942 ,  0.8336547 ,
15926          0.9640276 ,  0.9950547 ,  1.        ], dtype=float32)>
15927  }];
15928
15929  let arguments = (ins
15930    TF_FpOrComplexTensor:$x
15931  );
15932
15933  let results = (outs
15934    TF_FpOrComplexTensor:$y
15935  );
15936
15937  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15938}
15939
15940def TF_TanhGradOp : TF_Op<"TanhGrad", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
15941  let summary = "Computes the gradient for the tanh of `x` wrt its input.";
15942
15943  let description = [{
15944Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
15945is the corresponding input gradient.
15946  }];
15947
15948  let arguments = (ins
15949    TF_FpOrComplexTensor:$y,
15950    TF_FpOrComplexTensor:$dy
15951  );
15952
15953  let results = (outs
15954    TF_FpOrComplexTensor:$z
15955  );
15956
15957  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
15958}
15959
15960def TF_TensorArrayCloseV3Op : TF_Op<"TensorArrayCloseV3", []> {
15961  let summary = "Delete the TensorArray from its resource container.";
15962
15963  let description = [{
15964This enables the user to close and release the resource in the middle
15965of a step/run.
15966  }];
15967
15968  let arguments = (ins
15969    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayFree]>:$handle
15970  );
15971
15972  let results = (outs);
15973}
15974
15975def TF_TensorArrayConcatV3Op : TF_Op<"TensorArrayConcatV3", []> {
15976  let summary = "Concat the elements from the TensorArray into value `value`.";
15977
15978  let description = [{
15979Takes `T` elements of shapes
15980
15981  ```
15982  (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
15983  ```
15984
15985and concatenates them into a Tensor of shape:
15986
15987  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```
15988
15989All elements must have the same shape (excepting the first dimension).
15990  }];
15991
15992  let arguments = (ins
15993    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
15994    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
15995
15996    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape_except0
15997  );
15998
15999  let results = (outs
16000    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along the first
16001axis.}]>:$value,
16002    Res<TF_Int64Tensor, [{A vector of the row sizes of the original T elements in the
16003value output.  In the example above, this would be the values:
16004`(n1, n2, ..., n(T-1))`.}]>:$lengths
16005  );
16006
16007  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16008}
16009
16010def TF_TensorArrayGatherV3Op : TF_Op<"TensorArrayGatherV3", []> {
16011  let summary = [{
16012Gather specific elements from the TensorArray into output `value`.
16013  }];
16014
16015  let description = [{
16016All elements selected by `indices` must have the same shape.
16017  }];
16018
16019  let arguments = (ins
16020    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
16021    Arg<TF_Int32Tensor, [{The locations in the TensorArray from which to read tensor elements.}]>:$indices,
16022    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
16023
16024    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape
16025  );
16026
16027  let results = (outs
16028    Res<TF_Tensor, [{All of the elements in the TensorArray, concatenated along a new
16029axis (the new dimension 0).}]>:$value
16030  );
16031
16032  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16033}
16034
16035def TF_TensorArrayGradV3Op : TF_Op<"TensorArrayGradV3", []> {
16036  let summary = [{
16037Creates a TensorArray for storing the gradients of values in the given handle.
16038  }];
16039
16040  let description = [{
16041If the given TensorArray gradient already exists, returns a reference to it.
16042
16043Locks the size of the original TensorArray by disabling its dynamic size flag.
16044
16045**A note about the input flow_in:**
16046
16047The handle flow_in forces the execution of the gradient lookup to occur
16048only after certain other operations have occurred.  For example, when
16049the forward TensorArray is dynamically sized, writes to this TensorArray
16050may resize the object.  The gradient TensorArray is statically sized based
16051on the size of the forward TensorArray when this operation executes.
16052Furthermore, the size of the forward TensorArray is frozen by this call.
16053As a result, the flow is used to ensure that the call to generate the gradient
16054TensorArray only happens after all writes are executed.
16055
16056In the case of dynamically sized TensorArrays, gradient computation should
16057only be performed on read operations that have themselves been chained via
16058flow to occur only after all writes have executed. That way the final size
16059of the forward TensorArray is known when this operation is called.
16060
16061**A note about the source attribute:**
16062
16063TensorArray gradient calls use an accumulator TensorArray object.  If
16064multiple gradients are calculated and run in the same session, the multiple
16065gradient nodes may accidentally flow through the same accumulator TensorArray.
16066This double counts and generally breaks the TensorArray gradient flow.
16067
16068The solution is to identify which gradient call this particular
16069TensorArray gradient is being called in.  This is performed by identifying
16070a unique string (e.g. "gradients", "gradients_1", ...) from the input
16071gradient Tensor's name.  This string is used as a suffix when creating
16072the TensorArray gradient object here (the attribute `source`).
16073
16074The attribute `source` is added as a suffix to the forward TensorArray's
16075name when performing the creation / lookup, so that each separate gradient
16076calculation gets its own TensorArray accumulator.
16077  }];
16078
16079  let arguments = (ins
16080    Arg<TF_ResourceTensor, [{The handle to the forward TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16081    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in,
16082
16083    StrAttr:$source
16084  );
16085
16086  let results = (outs
16087    Res<TF_ResourceTensor, "", [TF_TensorArrayAlloc]>:$grad_handle,
16088    TF_Float32Tensor:$flow_out
16089  );
16090}
16091
16092def TF_TensorArrayReadV3Op : TF_Op<"TensorArrayReadV3", []> {
16093  let summary = "Read an element from the TensorArray into output `value`.";
16094
16095  let arguments = (ins
16096    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead]>:$handle,
16097    TF_Int32Tensor:$index,
16098    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16099  );
16100
16101  let results = (outs
16102    Res<TF_Tensor, [{The tensor that is read from the TensorArray.}]>:$value
16103  );
16104
16105  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
16106}
16107
16108def TF_TensorArrayScatterV3Op : TF_Op<"TensorArrayScatterV3", []> {
16109  let summary = [{
16110Scatter the data from the input value into specific TensorArray elements.
16111  }];
16112
16113  let description = [{
16114`indices` must be a vector, its length must match the first dim of `value`.
16115  }];
16116
16117  let arguments = (ins
16118    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16119    Arg<TF_Int32Tensor, [{The locations at which to write the tensor elements.}]>:$indices,
16120    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
16121    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16122  );
16123
16124  let results = (outs
16125    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
16126  );
16127
16128  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
16129}
16130
16131def TF_TensorArraySizeV3Op : TF_Op<"TensorArraySizeV3", []> {
16132  let summary = "Get the current size of the TensorArray.";
16133
16134  let arguments = (ins
16135    Arg<TF_ResourceTensor, [{The handle to a TensorArray (output of TensorArray or TensorArrayGrad).}], [TF_TensorArrayRead]>:$handle,
16136    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16137  );
16138
16139  let results = (outs
16140    Res<TF_Int32Tensor, [{The current size of the TensorArray.}]>:$size
16141  );
16142}
16143
16144def TF_TensorArraySplitV3Op : TF_Op<"TensorArraySplitV3", []> {
16145  let summary = [{
16146Split the data from the input value into TensorArray elements.
16147  }];
16148
16149  let description = [{
16150Assuming that `lengths` takes on values
16151
16152  ```(n0, n1, ..., n(T-1))```
16153
16154and that `value` has shape
16155
16156  ```(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```,
16157
16158this splits values into a TensorArray with T tensors.
16159
16160TensorArray index t will be the subtensor of values with starting position
16161
16162  ```(n0 + n1 + ... + n(t-1), 0, 0, ...)```
16163
16164and having size
16165
16166  ```nt x d0 x d1 x ...```
16167  }];
16168
16169  let arguments = (ins
16170    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16171    Arg<TF_Tensor, [{The concatenated tensor to write to the TensorArray.}]>:$value,
16172    Arg<TF_Int64Tensor, [{The vector of lengths, how to split the rows of value into the
16173TensorArray.}]>:$lengths,
16174    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16175  );
16176
16177  let results = (outs
16178    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
16179  );
16180
16181  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<1>;
16182}
16183
16184def TF_TensorArrayV3Op : TF_Op<"TensorArrayV3", []> {
16185  let summary = "An array of Tensors of given size.";
16186
16187  let description = [{
16188Write data via Write and read via Read or Pack.
16189  }];
16190
16191  let arguments = (ins
16192    Arg<TF_Int32Tensor, [{The size of the array.}]>:$size,
16193
16194    TypeAttr:$dtype,
16195    DefaultValuedAttr<TF_ShapeAttr, "llvm::None">:$element_shape,
16196    DefaultValuedAttr<BoolAttr, "false">:$dynamic_size,
16197    DefaultValuedAttr<BoolAttr, "true">:$clear_after_read,
16198    DefaultValuedAttr<BoolAttr, "false">:$identical_element_shapes,
16199    StrAttr:$tensor_array_name
16200  );
16201
16202  let results = (outs
16203    Res<TF_ResourceTensor, [{The handle to the TensorArray.}], [TF_TensorArrayAlloc]>:$handle,
16204    Res<TF_Float32Tensor, [{A scalar used to control gradient flow.}]>:$flow
16205  );
16206}
16207
16208def TF_TensorArrayWriteV3Op : TF_Op<"TensorArrayWriteV3", []> {
16209  let summary = "Push an element onto the tensor_array.";
16210
16211  let arguments = (ins
16212    Arg<TF_ResourceTensor, [{The handle to a TensorArray.}], [TF_TensorArrayRead, TF_TensorArrayWrite]>:$handle,
16213    Arg<TF_Int32Tensor, [{The position to write to inside the TensorArray.}]>:$index,
16214    Arg<TF_Tensor, [{The tensor to write to the TensorArray.}]>:$value,
16215    Arg<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_in
16216  );
16217
16218  let results = (outs
16219    Res<TF_Float32Tensor, [{A float scalar that enforces proper chaining of operations.}]>:$flow_out
16220  );
16221
16222  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
16223}
16224
16225def TF_TensorListConcatV2Op : TF_Op<"TensorListConcatV2", [NoSideEffect]> {
16226  let summary = "Concats all tensors in the list along the 0th dimension.";
16227
16228  let description = [{
16229Requires that all tensors have the same shape except the first dimension.
16230
16231input_handle: The input list.
16232element_shape: The shape of the uninitialized elements in the list. If the first
16233  dimension is not -1, it is assumed that all list elements have the same
16234  leading dim.
16235leading_dims: The list of leading dims of uninitialized list elements. Used if
16236  the leading dim of input_handle.element_shape or the element_shape input arg
16237  is not already set.
16238tensor: The concated result.
16239lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used for computing the gradient.
16240  }];
16241
16242  let arguments = (ins
16243    TF_VariantTensor:$input_handle,
16244    TF_I32OrI64Tensor:$element_shape,
16245    TF_Int64Tensor:$leading_dims
16246  );
16247
16248  let results = (outs
16249    TF_Tensor:$tensor,
16250    TF_Int64Tensor:$lengths
16251  );
16252
16253  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
16254  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16255}
16256
16257def TF_TensorListElementShapeOp : TF_Op<"TensorListElementShape", [NoSideEffect]> {
16258  let summary = "The shape of the elements of the given list, as a tensor.";
16259
16260  let description = [{
16261input_handle: the list
16262  element_shape: the shape of elements of the list
16263  }];
16264
16265  let arguments = (ins
16266    TF_VariantTensor:$input_handle
16267  );
16268
16269  let results = (outs
16270    TF_I32OrI64Tensor:$element_shape
16271  );
16272
16273  TF_DerivedResultTypeAttr shape_type = TF_DerivedResultTypeAttr<0>;
16274
16275  let hasFolder = 1;
16276}
16277
16278def TF_TensorListFromTensorOp : TF_Op<"TensorListFromTensor", [NoSideEffect]> {
16279  let summary = [{
16280Creates a TensorList which, when stacked, has the value of `tensor`.
16281  }];
16282
16283  let description = [{
16284Each tensor in the result list corresponds to one row of the input tensor.
16285
16286tensor: The input tensor.
16287output_handle: The list.
16288  }];
16289
16290  let arguments = (ins
16291    TF_Tensor:$tensor,
16292    TF_I32OrI64Tensor:$element_shape
16293  );
16294
16295  let results = (outs
16296    TF_VariantTensor:$output_handle
16297  );
16298
16299  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<1>;
16300  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<0>;
16301}
16302
16303def TF_TensorListGatherOp : TF_Op<"TensorListGather", [NoSideEffect]> {
16304  let summary = "Creates a Tensor by indexing into the TensorList.";
16305
16306  let description = [{
16307Each row in the produced Tensor corresponds to the element in the TensorList
16308specified by the given index (see `tf.gather`).
16309
16310input_handle: The input tensor list.
16311indices: The indices used to index into the list.
16312values: The tensor.
16313  }];
16314
16315  let arguments = (ins
16316    TF_VariantTensor:$input_handle,
16317    TF_Int32Tensor:$indices,
16318    TF_Int32Tensor:$element_shape
16319  );
16320
16321  let results = (outs
16322    TF_Tensor:$values
16323  );
16324
16325  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16326}
16327
16328def TF_TensorListGetItemOp : TF_Op<"TensorListGetItem", [NoSideEffect]> {
16329  let summary = "";
16330
16331  let arguments = (ins
16332    TF_VariantTensor:$input_handle,
16333    TF_Int32Tensor:$index,
16334    TF_Int32Tensor:$element_shape
16335  );
16336
16337  let results = (outs
16338    TF_Tensor:$item
16339  );
16340
16341  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16342}
16343
16344def TF_TensorListLengthOp : TF_Op<"TensorListLength", [NoSideEffect]> {
16345  let summary = "Returns the number of tensors in the input tensor list.";
16346
16347  let description = [{
16348input_handle: the input list
16349length: the number of tensors in the list
16350  }];
16351
16352  let arguments = (ins
16353    TF_VariantTensor:$input_handle
16354  );
16355
16356  let results = (outs
16357    TF_Int32Tensor:$length
16358  );
16359}
16360
16361def TF_TensorListPopBackOp : TF_Op<"TensorListPopBack", [NoSideEffect]> {
16362  let summary = [{
16363Returns the last element of the input list as well as a list with all but that element.
16364  }];
16365
16366  let description = [{
16367Fails if the list is empty.
16368
16369input_handle: the input list
16370tensor: the withdrawn last element of the list
16371element_dtype: the type of elements in the list
16372element_shape: the shape of the output tensor
16373  }];
16374
16375  let arguments = (ins
16376    TF_VariantTensor:$input_handle,
16377    TF_Int32Tensor:$element_shape
16378  );
16379
16380  let results = (outs
16381    TF_VariantTensor:$output_handle,
16382    TF_Tensor:$tensor
16383  );
16384
16385  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<1>;
16386}
16387
16388def TF_TensorListPushBackOp : TF_Op<"TensorListPushBack", [NoSideEffect]> {
16389  let summary = [{
16390Returns a list which has the passed-in `Tensor` as last element and the other elements of the given list in `input_handle`.
16391  }];
16392
16393  let description = [{
16394tensor: The tensor to put on the list.
16395input_handle: The old list.
16396output_handle: A list with the elements of the old list followed by tensor.
16397element_dtype: the type of elements in the list.
16398element_shape: a shape compatible with that of elements in the list.
16399  }];
16400
16401  let arguments = (ins
16402    TF_VariantTensor:$input_handle,
16403    TF_Tensor:$tensor
16404  );
16405
16406  let results = (outs
16407    TF_VariantTensor:$output_handle
16408  );
16409
16410  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
16411}
16412
16413def TF_TensorListResizeOp : TF_Op<"TensorListResize", [NoSideEffect]> {
16414  let summary = "Resizes the list.";
16415
16416  let description = [{
16417input_handle: the input list
16418size: size of the output list
16419  }];
16420
16421  let arguments = (ins
16422    TF_VariantTensor:$input_handle,
16423    TF_Int32Tensor:$size
16424  );
16425
16426  let results = (outs
16427    TF_VariantTensor:$output_handle
16428  );
16429}
16430
16431def TF_TensorListScatterIntoExistingListOp : TF_Op<"TensorListScatterIntoExistingList", [NoSideEffect]> {
16432  let summary = "Scatters tensor at indices in an input list.";
16433
16434  let description = [{
16435Each member of the TensorList corresponds to one row of the input tensor,
16436specified by the given index (see `tf.gather`).
16437
16438input_handle: The list to scatter into.
16439tensor: The input tensor.
16440indices: The indices used to index into the list.
16441output_handle: The TensorList.
16442  }];
16443
16444  let arguments = (ins
16445    TF_VariantTensor:$input_handle,
16446    TF_Tensor:$tensor,
16447    TF_Int32Tensor:$indices
16448  );
16449
16450  let results = (outs
16451    TF_VariantTensor:$output_handle
16452  );
16453
16454  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<1>;
16455}
16456
16457def TF_TensorListSetItemOp : TF_Op<"TensorListSetItem", [NoSideEffect]> {
16458  let summary = "";
16459
16460  let arguments = (ins
16461    TF_VariantTensor:$input_handle,
16462    TF_Int32Tensor:$index,
16463    TF_Tensor:$item
16464  );
16465
16466  let results = (outs
16467    TF_VariantTensor:$output_handle
16468  );
16469
16470  TF_DerivedOperandTypeAttr element_dtype = TF_DerivedOperandTypeAttr<2>;
16471}
16472
16473def TF_TensorListStackOp : TF_Op<"TensorListStack", [NoSideEffect]> {
16474  let summary = "Stacks all tensors in the list.";
16475
16476  let description = [{
16477Requires that all tensors have the same shape.
16478
16479input_handle: the input list
16480tensor: the gathered result
16481num_elements: optional. If not -1, the number of elements in the list.
16482  }];
16483
16484  let arguments = (ins
16485    TF_VariantTensor:$input_handle,
16486    TF_Int32Tensor:$element_shape,
16487
16488    DefaultValuedAttr<I64Attr, "-1">:$num_elements
16489  );
16490
16491  let results = (outs
16492    TF_Tensor:$tensor
16493  );
16494
16495  TF_DerivedResultTypeAttr element_dtype = TF_DerivedResultTypeAttr<0>;
16496
16497  let verifier = [{
16498    return Verify(*this);
16499  }];
16500}
16501
16502def TF_TensorScatterAddOp : TF_Op<"TensorScatterAdd", [NoSideEffect]> {
16503  let summary = [{
16504Adds sparse `updates` to an existing tensor according to `indices`.
16505  }];
16506
16507  let description = [{
16508This operation creates a new tensor by adding sparse `updates` to the passed
16509in `tensor`.
16510This operation is very similar to `tf.scatter_nd_add`, except that the updates
16511are added onto an existing tensor (as opposed to a variable). If the memory
16512for the existing tensor cannot be re-used, a copy is made and updated.
16513
16514`indices` is an integer tensor containing indices into a new tensor of shape
16515`tensor.shape`.  The last dimension of `indices` can be at most the rank of
16516`tensor.shape`:
16517
16518    indices.shape[-1] <= tensor.shape.rank
16519
16520The last dimension of `indices` corresponds to indices into elements
16521(if `indices.shape[-1] = tensor.shape.rank`) or slices
16522(if `indices.shape[-1] < tensor.shape.rank`) along dimension
16523`indices.shape[-1]` of `tensor.shape`.  `updates` is a tensor with shape
16524
16525    indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
16526
16527The simplest form of tensor_scatter_add is to add individual elements to a
16528tensor by index. For example, say we want to add 4 elements in a rank-1
16529tensor with 8 elements.
16530
16531In Python, this scatter add operation would look like this:
16532
16533```python
16534    indices = tf.constant([[4], [3], [1], [7]])
16535    updates = tf.constant([9, 10, 11, 12])
16536    tensor = tf.ones([8], dtype=tf.int32)
16537    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
16538    print(updated)
16539```
16540
16541The resulting tensor would look like this:
16542
16543    [1, 12, 1, 11, 10, 1, 1, 13]
16544
16545We can also, insert entire slices of a higher rank tensor all at once. For
16546example, if we wanted to insert two slices in the first dimension of a
16547rank-3 tensor with two matrices of new values.
16548
16549In Python, this scatter add operation would look like this:
16550
16551```python
16552    indices = tf.constant([[0], [2]])
16553    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
16554                            [7, 7, 7, 7], [8, 8, 8, 8]],
16555                           [[5, 5, 5, 5], [6, 6, 6, 6],
16556                            [7, 7, 7, 7], [8, 8, 8, 8]]])
16557    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
16558    updated = tf.tensor_scatter_nd_add(tensor, indices, updates)
16559    print(updated)
16560```
16561
16562The resulting tensor would look like this:
16563
16564    [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
16565     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
16566     [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]],
16567     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
16568
16569Note that on CPU, if an out of bound index is found, an error is returned.
16570On GPU, if an out of bound index is found, the index is ignored.
16571  }];
16572
16573  let arguments = (ins
16574    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16575    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16576    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16577  );
16578
16579  let results = (outs
16580    Res<TF_Tensor, [{A new tensor copied from tensor and updates added according to the indices.}]>:$output
16581  );
16582
16583  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16584  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16585}
16586
16587def TF_TensorScatterMaxOp : TF_Op<"TensorScatterMax", [NoSideEffect]> {
16588  let summary = "";
16589
16590  let arguments = (ins
16591    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
16592    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16593    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16594  );
16595
16596  let results = (outs
16597    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise maximum between tensor and updates according to the indices.}]>:$output
16598  );
16599
16600  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16601  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16602}
16603
16604def TF_TensorScatterMinOp : TF_Op<"TensorScatterMin", [NoSideEffect]> {
16605  let summary = "";
16606
16607  let arguments = (ins
16608    Arg<TF_Tensor, [{Tensor to update.}]>:$tensor,
16609    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16610    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16611  );
16612
16613  let results = (outs
16614    Res<TF_Tensor, [{A new tensor copied from tensor whose values are element-wise minimum between tensor and updates according to the indices.}]>:$output
16615  );
16616
16617  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16618  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16619}
16620
16621def TF_TensorScatterSubOp : TF_Op<"TensorScatterSub", [NoSideEffect]> {
16622  let summary = [{
16623Subtracts sparse `updates` from an existing tensor according to `indices`.
16624  }];
16625
16626  let description = [{
16627This operation creates a new tensor by subtracting sparse `updates` from the
16628passed in `tensor`.
16629This operation is very similar to `tf.scatter_nd_sub`, except that the updates
16630are subtracted from an existing tensor (as opposed to a variable). If the memory
16631for the existing tensor cannot be re-used, a copy is made and updated.
16632
16633`indices` is an integer tensor containing indices into a new tensor of shape
16634`shape`.  The last dimension of `indices` can be at most the rank of `shape`:
16635
16636    indices.shape[-1] <= shape.rank
16637
16638The last dimension of `indices` corresponds to indices into elements
16639(if `indices.shape[-1] = shape.rank`) or slices
16640(if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of
16641`shape`.  `updates` is a tensor with shape
16642
16643    indices.shape[:-1] + shape[indices.shape[-1]:]
16644
16645The simplest form of tensor_scatter_sub is to subtract individual elements
16646from a tensor by index. For example, say we want to insert 4 scattered elements
16647in a rank-1 tensor with 8 elements.
16648
16649In Python, this scatter subtract operation would look like this:
16650
16651```python
16652    indices = tf.constant([[4], [3], [1], [7]])
16653    updates = tf.constant([9, 10, 11, 12])
16654    tensor = tf.ones([8], dtype=tf.int32)
16655    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
16656    print(updated)
16657```
16658
16659The resulting tensor would look like this:
16660
16661    [1, -10, 1, -9, -8, 1, 1, -11]
16662
16663We can also, insert entire slices of a higher rank tensor all at once. For
16664example, if we wanted to insert two slices in the first dimension of a
16665rank-3 tensor with two matrices of new values.
16666
16667In Python, this scatter add operation would look like this:
16668
16669```python
16670    indices = tf.constant([[0], [2]])
16671    updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
16672                            [7, 7, 7, 7], [8, 8, 8, 8]],
16673                           [[5, 5, 5, 5], [6, 6, 6, 6],
16674                            [7, 7, 7, 7], [8, 8, 8, 8]]])
16675    tensor = tf.ones([4, 4, 4],dtype=tf.int32)
16676    updated = tf.tensor_scatter_nd_sub(tensor, indices, updates)
16677    print(updated)
16678```
16679
16680The resulting tensor would look like this:
16681
16682    [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
16683     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]],
16684     [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]],
16685     [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]]
16686
16687Note that on CPU, if an out of bound index is found, an error is returned.
16688On GPU, if an out of bound index is found, the index is ignored.
16689  }];
16690
16691  let arguments = (ins
16692    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16693    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16694    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16695  );
16696
16697  let results = (outs
16698    Res<TF_Tensor, [{A new tensor copied from tensor and updates subtracted according to the indices.}]>:$output
16699  );
16700
16701  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16702  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16703}
16704
16705def TF_TensorScatterUpdateOp : TF_Op<"TensorScatterUpdate", [NoSideEffect]> {
16706  let summary = [{
16707Scatter `updates` into an existing tensor according to `indices`.
16708  }];
16709
16710  let description = [{
16711This operation creates a new tensor by applying sparse `updates` to the passed
16712in `tensor`.
16713This operation is very similar to `tf.scatter_nd`, except that the updates are
16714scattered onto an existing tensor (as opposed to a zero-tensor). If the memory
16715for the existing tensor cannot be re-used, a copy is made and updated.
16716
16717If `indices` contains duplicates, then we pick the last update for the index.
16718
16719If an out of bound index is found on CPU, an error is returned.
16720
16721**WARNING**: There are some GPU specific semantics for this operation.
16722- If an out of bound index is found, the index is ignored.
16723- The order in which updates are applied is nondeterministic, so the output
16724will be nondeterministic if `indices` contains duplicates.
16725
16726`indices` is an integer tensor containing indices into a new tensor of shape
16727`shape`.
16728
16729* `indices` must have at least 2 axes: `(num_updates, index_depth)`.
16730* The last axis of `indices` is how deep to index into `tensor` so  this index
16731  depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim`
16732
16733if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements.
16734if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input
16735`tensor`.
16736
16737Each `update` has a rank of `tensor.rank - indices.shape[-1]`.
16738The overall shape of `updates` is:
16739
16740```
16741indices.shape[:-1] + tensor.shape[indices.shape[-1]:]
16742```
16743
16744For usage examples see the python [tf.tensor_scatter_nd_update](
16745https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function
16746  }];
16747
16748  let arguments = (ins
16749    Arg<TF_Tensor, [{Tensor to copy/update.}]>:$tensor,
16750    Arg<TF_I32OrI64Tensor, [{Index tensor.}]>:$indices,
16751    Arg<TF_Tensor, [{Updates to scatter into output.}]>:$updates
16752  );
16753
16754  let results = (outs
16755    Res<TF_Tensor, [{A new tensor with the given shape and updates applied according
16756to the indices.}]>:$output
16757  );
16758
16759  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
16760  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16761
16762  let verifier = [{ return Verify(*this); }];
16763
16764  let builders = [
16765    OpBuilderDAG<(ins "Value":$tensor, "Value":$indices, "Value":$updates),
16766    [{build($_builder, $_state, tensor.getType(), tensor, indices, updates);}]>
16767  ];
16768}
16769
16770def TF_TensorStridedSliceUpdateOp : TF_Op<"TensorStridedSliceUpdate", [NoSideEffect]> {
16771  let summary = "Assign `value` to the sliced l-value reference of `input`.";
16772
16773  let description = [{
16774The values of `value` are assigned to the positions in the tensor `input` that
16775are selected by the slice parameters. The slice parameters `begin` `end`
16776`strides` etc. work exactly as in `StridedSlice`.
16777
16778NOTE this op currently does not support broadcasting and so `value`'s shape
16779must be exactly the shape produced by the slice of `input`.
16780  }];
16781
16782  let arguments = (ins
16783    TF_Tensor:$input,
16784    TF_I32OrI64Tensor:$begin,
16785    TF_I32OrI64Tensor:$end,
16786    TF_I32OrI64Tensor:$strides,
16787    TF_Tensor:$value,
16788
16789    DefaultValuedAttr<I64Attr, "0">:$begin_mask,
16790    DefaultValuedAttr<I64Attr, "0">:$end_mask,
16791    DefaultValuedAttr<I64Attr, "0">:$ellipsis_mask,
16792    DefaultValuedAttr<I64Attr, "0">:$new_axis_mask,
16793    DefaultValuedAttr<I64Attr, "0">:$shrink_axis_mask
16794  );
16795
16796  let results = (outs
16797    TF_Tensor:$output
16798  );
16799
16800  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16801  TF_DerivedOperandTypeAttr Index = TF_DerivedOperandTypeAttr<1>;
16802}
16803
16804def TF_TileOp : TF_Op<"Tile", [NoSideEffect]> {
16805  let summary = "Constructs a tensor by tiling a given tensor.";
16806
16807  let description = [{
16808This operation creates a new tensor by replicating `input` `multiples` times.
16809The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements,
16810and the values of `input` are replicated `multiples[i]` times along the 'i'th
16811dimension. For example, tiling `[a b c d]` by `[2]` produces
16812`[a b c d a b c d]`.
16813
16814>>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32)
16815>>> b = tf.constant([1,2], tf.int32)
16816>>> tf.tile(a, b)
16817<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
16818array([[1, 2, 3, 1, 2, 3],
16819       [4, 5, 6, 4, 5, 6]], dtype=int32)>
16820>>> c = tf.constant([2,1], tf.int32)
16821>>> tf.tile(a, c)
16822<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
16823array([[1, 2, 3],
16824       [4, 5, 6],
16825       [1, 2, 3],
16826       [4, 5, 6]], dtype=int32)>
16827>>> d = tf.constant([2,2], tf.int32)
16828>>> tf.tile(a, d)
16829<tf.Tensor: shape=(4, 6), dtype=int32, numpy=
16830array([[1, 2, 3, 1, 2, 3],
16831       [4, 5, 6, 4, 5, 6],
16832       [1, 2, 3, 1, 2, 3],
16833       [4, 5, 6, 4, 5, 6]], dtype=int32)>
16834  }];
16835
16836  let arguments = (ins
16837    Arg<TF_Tensor, [{1-D or higher.}]>:$input,
16838    Arg<TF_I32OrI64Tensor, [{1-D. Length must be the same as the number of dimensions in `input`}]>:$multiples
16839  );
16840
16841  let results = (outs
16842    TF_Tensor:$output
16843  );
16844
16845  TF_DerivedOperandTypeAttr Tmultiples = TF_DerivedOperandTypeAttr<1>;
16846  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16847
16848  let verifier = [{ return Verify(*this); }];
16849
16850  let hasFolder = 1;
16851}
16852
16853def TF_TopKUniqueOp : TF_Op<"TopKUnique", [NoSideEffect]> {
16854  let summary = "Returns the TopK unique values in the array in sorted order.";
16855
16856  let description = [{
16857The running time is proportional to the product of K and the input
16858size. Sorting the whole array is more efficient for sufficiently large
16859values of K. The median-of-medians algorithm is probably faster, but
16860difficult to implement efficiently in XLA. If there are fewer than K
16861unique numbers (not NANs), the results are padded with negative
16862infinity. NaNs are never returned. Subnormal numbers are flushed to
16863zero. If an element appears at multiple indices, the highest index is
16864returned. If a TopK element never appears in the input due to padding
16865values, the indices are padded with negative one. If a padding value
16866appears in the input and padding is needed, the highest index of the
16867padding value will be returned. The semantics are not the same as
16868kth_order_statistic.
16869  }];
16870
16871  let arguments = (ins
16872    TF_Float32Tensor:$input,
16873
16874    I64Attr:$k
16875  );
16876
16877  let results = (outs
16878    TF_Float32Tensor:$topk,
16879    TF_Int32Tensor:$topk_indices
16880  );
16881}
16882
16883def TF_TopKV2Op : TF_Op<"TopKV2", [NoSideEffect]> {
16884  let summary = [{
16885Finds values and indices of the `k` largest elements for the last dimension.
16886  }];
16887
16888  let description = [{
16889If the input is a vector (rank-1), finds the `k` largest entries in the vector
16890and outputs their values and indices as vectors.  Thus `values[j]` is the
16891`j`-th largest entry in `input`, and its index is `indices[j]`.
16892
16893For matrices (resp. higher rank input), computes the top `k` entries in each
16894row (resp. vector along the last dimension).  Thus,
16895
16896    values.shape = indices.shape = input.shape[:-1] + [k]
16897
16898If two elements are equal, the lower-index element appears first.
16899  }];
16900
16901  let arguments = (ins
16902    Arg<TF_IntOrFpTensor, [{1-D or higher with last dimension at least `k`.}]>:$input,
16903    Arg<TF_Int32Tensor, [{0-D.  Number of top elements to look for along the last dimension (along each
16904row for matrices).}]>:$k,
16905
16906    DefaultValuedAttr<BoolAttr, "true">:$sorted
16907  );
16908
16909  let results = (outs
16910    Res<TF_IntOrFpTensor, [{The `k` largest elements along each last dimensional slice.}]>:$values,
16911    Res<TF_Int32Tensor, [{The indices of `values` within the last dimension of `input`.}]>:$indices
16912  );
16913
16914  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16915
16916  let verifier = [{ return Verify(*this); }];
16917}
16918
16919def TF_TopKWithUniqueOp : TF_Op<"TopKWithUnique", [NoSideEffect]> {
16920  let summary = "Returns the TopK values in the array in sorted order.";
16921
16922  let description = [{
16923This is a combination of MakeUnique and TopKUnique. The returned top-K will
16924have its lower bits replaced by iota, thus it will be close to the original
16925value but not exactly the same. The running time is proportional to the product
16926of K and the input size. NaNs are never returned. Subnormal numbers are flushed
16927to zero.
16928  }];
16929
16930  let arguments = (ins
16931    TF_Float32Tensor:$input,
16932
16933    I64Attr:$k
16934  );
16935
16936  let results = (outs
16937    TF_Float32Tensor:$topk,
16938    TF_Int32Tensor:$topk_indices
16939  );
16940}
16941
16942def TF_TransposeOp : TF_Op<"Transpose", [NoSideEffect]> {
16943  let summary = "Shuffle dimensions of x according to a permutation.";
16944
16945  let description = [{
16946The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy:
16947  `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]`
16948  }];
16949
16950  let arguments = (ins
16951    TF_Tensor:$x,
16952    TF_I32OrI64Tensor:$perm
16953  );
16954
16955  let results = (outs
16956    TF_Tensor:$y
16957  );
16958
16959  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
16960  TF_DerivedOperandTypeAttr Tperm = TF_DerivedOperandTypeAttr<1>;
16961
16962  let builders = [
16963    OpBuilderDAG<(ins "Value":$x, "Value":$perm)>
16964  ];
16965
16966  let verifier = [{
16967    return Verify(*this);
16968  }];
16969
16970  let hasFolder = 1;
16971}
16972
16973def TF_TridiagonalSolveOp : TF_Op<"TridiagonalSolve", [NoSideEffect]> {
16974  let summary = "Solves tridiagonal systems of equations.";
16975
16976  let description = [{
16977Solves tridiagonal systems of equations.
16978  Supports batch dimensions and multiple right-hand sides per each left-hand
16979  side.
16980  On CPU, solution is computed via Gaussian elimination with or without partial
16981  pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
16982  library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
16983  Partial pivoting is not yet supported by XLA backends.
16984  }];
16985
16986  let arguments = (ins
16987    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
16988tridiagonal matrices with three rows being the superdiagonal, diagonals, and
16989subdiagonals, in order. The last element of the superdiagonal and the first
16990element of the subdiagonal is ignored.}]>:$diagonals,
16991    Arg<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]`, representing K right-hand sides per each
16992left-hand side.}]>:$rhs,
16993
16994    DefaultValuedAttr<BoolAttr, "true">:$partial_pivoting
16995  );
16996
16997  let results = (outs
16998    Res<TensorOf<[TF_Complex128, TF_Complex64, TF_Float32, TF_Float64]>, [{Tensor of shape `[..., M, K]` containing the solutions}]>:$output
16999  );
17000
17001  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17002}
17003
17004def TF_TruncateDivOp : TF_Op<"TruncateDiv", [NoSideEffect, ResultsBroadcastableShape]>,
17005                       WithBroadcastableBinOpBuilder {
17006  let summary = "Returns x / y element-wise for integer types.";
17007
17008  let description = [{
17009Truncation designates that negative numbers will round fractional quantities
17010toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
17011than Python semantics. See `FloorDiv` for a division function that matches
17012Python Semantics.
17013
17014*NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
17015[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17016  }];
17017
17018  let arguments = (ins
17019    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$x,
17020    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$y
17021  );
17022
17023  let results = (outs
17024    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$z
17025  );
17026
17027  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17028
17029  let hasCanonicalizer = 1;
17030}
17031
17032def TF_TruncateModOp : TF_Op<"TruncateMod", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
17033                       WithBroadcastableBinOpBuilder {
17034  let summary = [{
17035Returns element-wise remainder of division. This emulates C semantics in that
17036  }];
17037
17038  let description = [{
17039the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
17040y + truncate_mod(x, y) = x`.
17041
17042*NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
17043[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
17044  }];
17045
17046  let arguments = (ins
17047    TF_FpOrI32OrI64Tensor:$x,
17048    TF_FpOrI32OrI64Tensor:$y
17049  );
17050
17051  let results = (outs
17052    TF_FpOrI32OrI64Tensor:$z
17053  );
17054
17055  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17056}
17057
17058def TF_TruncatedNormalOp : TF_Op<"TruncatedNormal", [TF_CannotDuplicate]> {
17059  let summary = "Outputs random values from a truncated normal distribution.";
17060
17061  let description = [{
17062The generated values follow a normal distribution with mean 0 and standard
17063deviation 1, except that values whose magnitude is more than 2 standard
17064deviations from the mean are dropped and re-picked.
17065  }];
17066
17067  let arguments = (ins
17068    Arg<TF_I32OrI64Tensor, [{The shape of the output tensor.}]>:$shape,
17069
17070    DefaultValuedAttr<I64Attr, "0">:$seed,
17071    DefaultValuedAttr<I64Attr, "0">:$seed2
17072  );
17073
17074  let results = (outs
17075    Res<TF_FloatTensor, [{A tensor of the specified shape filled with random truncated normal
17076values.}]>:$output
17077  );
17078
17079  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17080  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17081}
17082
17083def TF_UncompressElementOp : TF_Op<"UncompressElement", [NoSideEffect]> {
17084  let summary = "Uncompresses a compressed dataset element.";
17085
17086  let arguments = (ins
17087    TF_VariantTensor:$compressed
17088  );
17089
17090  let results = (outs
17091    Variadic<TF_Tensor>:$components
17092  );
17093
17094  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
17095  TF_DerivedResultTypeListAttr output_types = TF_DerivedResultTypeListAttr<0>;
17096}
17097
17098def TF_UniqueOp : TF_Op<"Unique", [NoSideEffect]> {
17099  let summary = "Finds unique elements in a 1-D tensor.";
17100
17101  let description = [{
17102This operation returns a tensor `y` containing all of the unique elements of `x`
17103sorted in the same order that they occur in `x`; `x` does not need to be sorted.
17104This operation also returns a tensor `idx` the same size as `x` that contains
17105the index of each value of `x` in the unique output `y`. In other words:
17106
17107`y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]`
17108
17109Examples:
17110
17111```
17112# tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
17113y, idx = unique(x)
17114y ==> [1, 2, 4, 7, 8]
17115idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4]
17116```
17117
17118```
17119# tensor 'x' is [4, 5, 1, 2, 3, 3, 4, 5]
17120y, idx = unique(x)
17121y ==> [4, 5, 1, 2, 3]
17122idx ==> [0, 1, 2, 3, 4, 4, 0, 1]
17123```
17124  }];
17125
17126  let arguments = (ins
17127    Arg<TF_Tensor, [{1-D.}]>:$x
17128  );
17129
17130  let results = (outs
17131    Res<TF_Tensor, [{1-D.}]>:$y,
17132    Res<TF_I32OrI64Tensor, [{1-D.}]>:$idx
17133  );
17134
17135  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17136  TF_DerivedResultTypeAttr out_idx = TF_DerivedResultTypeAttr<1>;
17137}
17138
17139def TF_UnpackOp : TF_Op<"Unpack", [NoSideEffect]> {
17140  let summary = [{
17141Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors.
17142  }];
17143
17144  let description = [{
17145Unpacks `num` tensors from `value` by chipping it along the `axis` dimension.
17146For example, given a tensor of shape `(A, B, C, D)`;
17147
17148If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]`
17149  and each tensor in `output` will have shape `(B, C, D)`. (Note that the
17150  dimension unpacked along is gone, unlike `split`).
17151
17152If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]`
17153  and each tensor in `output` will have shape `(A, C, D)`.
17154Etc.
17155
17156This is the opposite of `pack`.
17157  }];
17158
17159  let arguments = (ins
17160    Arg<TF_Tensor, [{1-D or higher, with `axis` dimension size equal to `num`.}]>:$value,
17161
17162    DefaultValuedAttr<I64Attr, "0">:$axis
17163  );
17164
17165  let results = (outs
17166    Res<Variadic<TF_Tensor>, [{The list of tensors unpacked from `value`.}]>:$output
17167  );
17168
17169  TF_DerivedResultSizeAttr num = TF_DerivedResultSizeAttr<0>;
17170  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17171
17172  let verifier = [{ return Verify(*this); }];
17173
17174  let hasCanonicalizer = 1;
17175}
17176
17177def TF_UnsortedSegmentMaxOp : TF_Op<"UnsortedSegmentMax", [NoSideEffect]> {
17178  let summary = "Computes the maximum along segments of a tensor.";
17179
17180  let description = [{
17181Read
17182[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17183for an explanation of segments.
17184
17185This operator is similar to the unsorted segment sum operator found
17186[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17187Instead of computing the sum over segments, it computes the maximum such that:
17188
17189\\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
17190that `segment_ids[j...] == i`.
17191
17192If the maximum is empty for a given segment ID `i`, it outputs the smallest
17193possible value for the specific numeric type,
17194`output[i] = numeric_limits<T>::lowest()`.
17195
17196If the given segment ID `i` is negative, then the corresponding value is
17197dropped, and will not be included in the result.
17198
17199<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
17200<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
17201</div>
17202
17203For example:
17204
17205``` python
17206c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17207tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2)
17208# ==> [[ 4,  3, 3, 4],
17209#       [5,  6, 7, 8]]
17210```
17211  }];
17212
17213  let arguments = (ins
17214    TF_IntOrFpTensor:$data,
17215    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17216    TF_I32OrI64Tensor:$num_segments
17217  );
17218
17219  let results = (outs
17220    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
17221dimensions, which are replaced with a single dimension which has size
17222`num_segments`.}]>:$output
17223  );
17224
17225  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17226  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17227  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17228
17229  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17230}
17231
17232def TF_UnsortedSegmentMinOp : TF_Op<"UnsortedSegmentMin", [NoSideEffect]> {
17233  let summary = "Computes the minimum along segments of a tensor.";
17234
17235  let description = [{
17236Read
17237[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17238for an explanation of segments.
17239
17240This operator is similar to the unsorted segment sum operator found
17241[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17242Instead of computing the sum over segments, it computes the minimum such that:
17243
17244\\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
17245that `segment_ids[j...] == i`.
17246
17247If the minimum is empty for a given segment ID `i`, it outputs the largest
17248possible value for the specific numeric type,
17249`output[i] = numeric_limits<T>::max()`.
17250
17251For example:
17252
17253``` python
17254c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17255tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2)
17256# ==> [[ 1,  2, 2, 1],
17257#       [5,  6, 7, 8]]
17258```
17259
17260If the given segment ID `i` is negative, then the corresponding value is
17261dropped, and will not be included in the result.
17262  }];
17263
17264  let arguments = (ins
17265    TF_IntOrFpTensor:$data,
17266    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17267    TF_I32OrI64Tensor:$num_segments
17268  );
17269
17270  let results = (outs
17271    Res<TF_IntOrFpTensor, [{Has same shape as data, except for the first `segment_ids.rank`
17272dimensions, which are replaced with a single dimension which has size
17273`num_segments`.}]>:$output
17274  );
17275
17276  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17277  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17278  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17279
17280  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17281}
17282
17283def TF_UnsortedSegmentProdOp : TF_Op<"UnsortedSegmentProd", [NoSideEffect]> {
17284  let summary = "Computes the product along segments of a tensor.";
17285
17286  let description = [{
17287Read
17288[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17289for an explanation of segments.
17290
17291This operator is similar to the unsorted segment sum operator found
17292[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
17293Instead of computing the sum over segments, it computes the product of all
17294entries belonging to a segment such that:
17295
17296\\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
17297`j...` such that `segment_ids[j...] == i`.
17298
17299For example:
17300
17301``` python
17302c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17303tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2)
17304# ==> [[ 4,  6, 6, 4],
17305#       [5,  6, 7, 8]]
17306```
17307
17308If there is no entry for a given segment ID `i`, it outputs 1.
17309
17310If the given segment ID `i` is negative, then the corresponding value is
17311dropped, and will not be included in the result.
17312  }];
17313
17314  let arguments = (ins
17315    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
17316    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17317    TF_I32OrI64Tensor:$num_segments
17318  );
17319
17320  let results = (outs
17321    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
17322dimensions, which are replaced with a single dimension which has size
17323`num_segments`.}]>:$output
17324  );
17325
17326  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17327  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17328  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17329
17330  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17331}
17332
17333def TF_UnsortedSegmentSumOp : TF_Op<"UnsortedSegmentSum", [NoSideEffect]> {
17334  let summary = "Computes the sum along segments of a tensor.";
17335
17336  let description = [{
17337Read
17338[the section on segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation)
17339for an explanation of segments.
17340
17341Computes a tensor such that
17342\\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
17343that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
17344need not be sorted and need not cover all values in the full
17345range of valid values.
17346
17347If the sum is empty for a given segment ID `i`, `output[i] = 0`.
17348If the given segment ID `i` is negative, the value is dropped and will not be
17349added to the sum of the segment.
17350
17351`num_segments` should equal the number of distinct segment IDs.
17352
17353<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
17354<img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
17355</div>
17356
17357``` python
17358c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]])
17359tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
17360# ==> [[ 5,  5, 5, 5],
17361#       [5,  6, 7, 8]]
17362```
17363  }];
17364
17365  let arguments = (ins
17366    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$data,
17367    Arg<TF_I32OrI64Tensor, [{A tensor whose shape is a prefix of `data.shape`.}]>:$segment_ids,
17368    TF_I32OrI64Tensor:$num_segments
17369  );
17370
17371  let results = (outs
17372    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Has same shape as data, except for the first `segment_ids.rank`
17373dimensions, which are replaced with a single dimension which has size
17374`num_segments`.}]>:$output
17375  );
17376
17377  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17378  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17379  TF_DerivedOperandTypeAttr Tnumsegments = TF_DerivedOperandTypeAttr<2>;
17380
17381  let verifier = [{ return VerifyUnsortedSegmentReduction(*this); }];
17382}
17383
17384def TF_UpperBoundOp : TF_Op<"UpperBound", [NoSideEffect]> {
17385  let summary = [{
17386Applies upper_bound(sorted_search_values, values) along each row.
17387  }];
17388
17389  let description = [{
17390Each set of rows with the same index in (sorted_inputs, values) is treated
17391independently.  The resulting row is the equivalent of calling
17392`np.searchsorted(sorted_inputs, values, side='right')`.
17393
17394The result is not a global index to the entire
17395`Tensor`, but rather just the index in the last dimension.
17396
17397A 2-D example:
17398  sorted_sequence = [[0, 3, 9, 9, 10],
17399                     [1, 2, 3, 4, 5]]
17400  values = [[2, 4, 9],
17401            [0, 2, 6]]
17402
17403  result = UpperBound(sorted_sequence, values)
17404
17405  result == [[1, 2, 4],
17406             [0, 2, 5]]
17407  }];
17408
17409  let arguments = (ins
17410    Arg<TF_Tensor, [{2-D Tensor where each row is ordered.}]>:$sorted_inputs,
17411    Arg<TF_Tensor, [{2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains
17412the values that will be searched for in `sorted_search_values`.}]>:$values
17413  );
17414
17415  let results = (outs
17416    Res<TF_I32OrI64Tensor, [{A `Tensor` with the same shape as `values`.  It contains the last scalar index
17417into the last dimension where values can be inserted without changing the
17418ordered property.}]>:$output
17419  );
17420
17421  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17422  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
17423}
17424
17425def TF_VarIsInitializedOp : TF_Op<"VarIsInitializedOp", []> {
17426  let summary = [{
17427Checks whether a resource handle-based variable has been initialized.
17428  }];
17429
17430  let arguments = (ins
17431    Arg<TF_ResourceTensor, [{the input resource handle.}], [TF_VariableRead]>:$resource
17432  );
17433
17434  let results = (outs
17435    Res<TF_BoolTensor, [{a scalar boolean which is true if the variable has been
17436initialized.}]>:$is_initialized
17437  );
17438
17439  let hasCanonicalizer = 1;
17440}
17441
17442def TF_VariableOp : TF_Op<"Variable", []> {
17443  let summary = "Use VariableV2 instead.";
17444
17445  let arguments = (ins
17446    TF_ShapeAttr:$shape,
17447    StrAttr:$container,
17448    StrAttr:$shared_name
17449  );
17450
17451  let results = (outs
17452    TF_Tensor:$ref
17453  );
17454
17455  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17456
17457  let hasCanonicalizer = 1;
17458}
17459
17460def TF_VariableShapeOp : TF_Op<"VariableShape", []> {
17461  let summary = "Returns the shape of the variable pointed to by `resource`.";
17462
17463  let description = [{
17464This operation returns a 1-D integer tensor representing the shape of `input`.
17465
17466For example:
17467
17468```
17469# 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
17470shape(t) ==> [2, 2, 3]
17471```
17472  }];
17473
17474  let arguments = (ins
17475    Arg<TF_ResourceTensor, "", [TF_VariableRead]>:$input
17476  );
17477
17478  let results = (outs
17479    TF_I32OrI64Tensor:$output
17480  );
17481
17482  TF_DerivedResultTypeAttr out_type = TF_DerivedResultTypeAttr<0>;
17483
17484  let verifier = [{
17485    return Verify(*this);
17486  }];
17487
17488  let hasFolder = 1;
17489}
17490
17491def TF_VariableV2Op : TF_Op<"VariableV2", []> {
17492  let summary = [{
17493Holds state in the form of a tensor that persists across steps.
17494  }];
17495
17496  let description = [{
17497Outputs a ref to the tensor state so it may be read or modified.
17498TODO(zhifengc/mrry): Adds a pointer to a more detail document
17499about sharing states in tensorflow.
17500  }];
17501
17502  let arguments = (ins
17503    TF_ShapeAttr:$shape,
17504    StrAttr:$container,
17505    StrAttr:$shared_name
17506  );
17507
17508  let results = (outs
17509    Res<TF_Tensor, [{A reference to the variable tensor.}]>:$ref
17510  );
17511
17512  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
17513}
17514
17515def TF_WhereOp : TF_Op<"Where", [NoSideEffect]> {
17516  let summary = "Returns locations of nonzero / true values in a tensor.";
17517
17518  let description = [{
17519This operation returns the coordinates of true elements in `condition`. The
17520coordinates are returned in a 2-D tensor where the first dimension (rows)
17521represents the number of true elements, and the second dimension (columns)
17522represents the coordinates of the true elements. Keep in mind, the shape of
17523the output tensor can vary depending on how many true values there are in
17524`condition`. Indices are output in row-major order.
17525
17526For example:
17527
17528```
17529# 'input' tensor is [[True, False]
17530#                    [True, False]]
17531# 'input' has two true values, so output has two coordinates.
17532# 'input' has rank of 2, so coordinates have two indices.
17533where(input) ==> [[0, 0],
17534                  [1, 0]]
17535
17536# `condition` tensor is [[[True, False]
17537#                     [True, False]]
17538#                    [[False, True]
17539#                     [False, True]]
17540#                    [[False, False]
17541#                     [False, True]]]
17542# 'input' has 5 true values, so output has 5 coordinates.
17543# 'input' has rank of 3, so coordinates have three indices.
17544where(input) ==> [[0, 0, 0],
17545                  [0, 1, 0],
17546                  [1, 0, 1],
17547                  [1, 1, 1],
17548                  [2, 1, 1]]
17549
17550# `condition` tensor is [[[1.5,  0.0]
17551#                     [-0.5, 0.0]]
17552#                    [[0.0,  0.25]
17553#                     [0.0,  0.75]]
17554#                    [[0.0,  0.0]
17555#                     [0.0,  0.01]]]
17556# 'input' has 5 nonzero values, so output has 5 coordinates.
17557# 'input' has rank of 3, so coordinates have three indices.
17558where(input) ==> [[0, 0, 0],
17559                  [0, 1, 0],
17560                  [1, 0, 1],
17561                  [1, 1, 1],
17562                  [2, 1, 1]]
17563
17564# `condition` tensor is [[[1.5 + 0.0j, 0.0  + 0.0j]
17565#                     [0.0 + 0.5j, 0.0  + 0.0j]]
17566#                    [[0.0 + 0.0j, 0.25 + 1.5j]
17567#                     [0.0 + 0.0j, 0.75 + 0.0j]]
17568#                    [[0.0 + 0.0j, 0.0  + 0.0j]
17569#                     [0.0 + 0.0j, 0.01 + 0.0j]]]
17570# 'input' has 5 nonzero magnitude values, so output has 5 coordinates.
17571# 'input' has rank of 3, so coordinates have three indices.
17572where(input) ==> [[0, 0, 0],
17573                  [0, 1, 0],
17574                  [1, 0, 1],
17575                  [1, 1, 1],
17576                  [2, 1, 1]]
17577```
17578  }];
17579
17580  let arguments = (ins
17581    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input
17582  );
17583
17584  let results = (outs
17585    TF_Int64Tensor:$index
17586  );
17587
17588  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17589}
17590
17591def TF_XdivyOp : TF_Op<"Xdivy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
17592                 WithBroadcastableBinOpBuilder {
17593  let summary = "Returns 0 if x == 0, and x / y otherwise, elementwise.";
17594
17595  let arguments = (ins
17596    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
17597    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
17598  );
17599
17600  let results = (outs
17601    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
17602  );
17603
17604  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17605
17606  let hasCanonicalizer = 1;
17607}
17608
17609def TF_XlaBroadcastHelperOp : TF_Op<"XlaBroadcastHelper", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect]> {
17610  let summary = "Helper operator for performing XLA-style broadcasts";
17611
17612  let description = [{
17613Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to
17614whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules
17615for binary operators.
17616  }];
17617
17618  let arguments = (ins
17619    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS input tensor}]>:$lhs,
17620    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS input tensor}]>:$rhs,
17621    Arg<TF_I32OrI64Tensor, [{an XLA-style broadcast dimension specification}]>:$broadcast_dims
17622  );
17623
17624  let results = (outs
17625    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted LHS tensor}]>:$lhs_output,
17626    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the broadcasted RHS tensor}]>:$rhs_output
17627  );
17628
17629  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17630  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17631
17632  let extraClassDeclaration = [{
17633    // InferTypeOpInterface:
17634    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
17635      return ArraysAreCastCompatible(l, r);
17636    }
17637  }];
17638}
17639
17640def TF_XlaClusterOutputOp : TF_Op<"XlaClusterOutput", [NoSideEffect]> {
17641  let summary = [{
17642Operator that connects the output of an XLA computation to other consumer graph nodes.
17643  }];
17644
17645  let arguments = (ins
17646    TF_Tensor:$input
17647  );
17648
17649  let results = (outs
17650    TF_Tensor:$outputs
17651  );
17652
17653  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17654}
17655
17656def TF_XlaConvOp : TF_Op<"XlaConv", [NoSideEffect]> {
17657  let summary = "Wraps the XLA ConvGeneralDilated operator, documented at";
17658
17659  let description = [{
17660https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
17661.
17662  }];
17663
17664  let arguments = (ins
17665    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$lhs,
17666    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the kernel tensor}]>:$rhs,
17667    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17668    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17669    Arg<TF_I32OrI64Tensor, [{dilation to apply between input elements}]>:$lhs_dilation,
17670    Arg<TF_I32OrI64Tensor, [{dilation to apply between kernel elements}]>:$rhs_dilation,
17671    Arg<TF_I32OrI64Tensor, [{number of feature groups for grouped convolution.}]>:$feature_group_count,
17672
17673    StrAttr:$dimension_numbers,
17674    StrAttr:$precision_config
17675  );
17676
17677  let results = (outs
17678    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17679  );
17680
17681  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17682  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17683}
17684
17685def TF_XlaDotOp : TF_Op<"XlaDot", [NoSideEffect]> {
17686  let summary = "Wraps the XLA DotGeneral operator, documented at";
17687
17688  let description = [{
17689https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral
17690.
17691  }];
17692
17693  let arguments = (ins
17694    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the LHS tensor}]>:$lhs,
17695    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the RHS tensor}]>:$rhs,
17696
17697    StrAttr:$dimension_numbers,
17698    StrAttr:$precision_config
17699  );
17700
17701  let results = (outs
17702    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17703  );
17704
17705  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17706}
17707
17708def TF_XlaDynamicSliceOp : TF_Op<"XlaDynamicSlice", [NoSideEffect]> {
17709  let summary = "Wraps the XLA DynamicSlice operator, documented at";
17710
17711  let description = [{
17712https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice
17713.
17714
17715DynamicSlice extracts a sub-array from the input array at dynamic
17716start_indices. The size of the slice in each dimension is passed in
17717size_indices, which specify the end point of exclusive slice intervals in each
17718dimension -- [start, start + size). The shape of start_indices must have rank 1,
17719with dimension size equal to the rank of operand.
17720  }];
17721
17722  let arguments = (ins
17723    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17724    Arg<TF_I32OrI64Tensor, [{List of N integers containing the slice size for each
17725dimension. Each value must be strictly greater than zero, and start + size
17726must be less than or equal to the size of the dimension to avoid
17727implementation defined behavior.}]>:$start_indices,
17728    TF_I32OrI64Tensor:$size_indices
17729  );
17730
17731  let results = (outs
17732    TF_Tensor:$output
17733  );
17734
17735  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17736  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17737}
17738
17739def TF_XlaDynamicUpdateSliceOp : TF_Op<"XlaDynamicUpdateSlice", [NoSideEffect]> {
17740  let summary = "Wraps the XLA DynamicUpdateSlice operator, documented at";
17741
17742  let description = [{
17743https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice
17744.
17745
17746XlaDynamicUpdateSlice generates a result which is the value of the `input`
17747operand, with a slice update overwritten at `indices`. The shape of `update`
17748determines the shape of the sub-array of the result which is updated. The shape
17749of indices must be rank == 1, with dimension size equal to the rank of `input`.
17750
17751Handling of out-of-bounds slice indices is implementation-defined.
17752  }];
17753
17754  let arguments = (ins
17755    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17756    Arg<TF_Tensor, [{A `Tensor` of type T. Same rank as `input`.}]>:$update,
17757    Arg<TF_I32OrI64Tensor, [{A vector of indices into `input`. Must have length equal to the rank of
17758`input`.}]>:$indices
17759  );
17760
17761  let results = (outs
17762    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
17763  );
17764
17765  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17766  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17767}
17768
17769def TF_XlaEinsumOp : TF_Op<"XlaEinsum", [NoSideEffect]> {
17770  let summary = [{
17771An op which supports basic einsum op with 2 inputs and 1 output.
17772  }];
17773
17774  let description = [{
17775This op has better TPU performance since it doesn't have explicitly reshape and
17776transpose operations as tf.einsum does.
17777  }];
17778
17779  let arguments = (ins
17780    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$a,
17781    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$b,
17782
17783    StrAttr:$equation
17784  );
17785
17786  let results = (outs
17787    TensorOf<[TF_Bfloat16, TF_Complex64, TF_Float32]>:$product
17788  );
17789
17790  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17791}
17792
17793def TF_XlaGatherOp : TF_Op<"XlaGather", [NoSideEffect]> {
17794  let summary = "Wraps the XLA Gather operator documented at";
17795
17796  let description = [{
17797https://www.tensorflow.org/xla/operation_semantics#gather
17798  }];
17799
17800  let arguments = (ins
17801    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The array we're gathering from.}]>:$operand,
17802    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices we gather.}]>:$start_indices,
17803    Arg<TF_I32OrI64Tensor, [{slice_sizes[i] is the bounds for the slice on dimension i.}]>:$slice_sizes,
17804
17805    StrAttr:$dimension_numbers,
17806    BoolAttr:$indices_are_sorted
17807  );
17808
17809  let results = (outs
17810    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17811  );
17812
17813  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
17814  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17815}
17816
17817def TF_XlaHostComputeOp : TF_Op<"XlaHostCompute", []> {
17818  let summary = [{
17819A pseudo-op to represent host-side computation in an XLA program.
17820  }];
17821
17822  let arguments = (ins
17823    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
17824
17825    StrArrayAttr:$ancestors,
17826    TF_ShapeAttrArray:$shapes,
17827    SymbolRefAttr:$shape_inference_graph,
17828    StrAttr:$key,
17829    DefaultValuedAttr<I64Attr, "1000000">:$cost_estimate_ns,
17830    DefaultValuedAttr<I64Attr, "0">:$tpu_core
17831  );
17832
17833  let results = (outs
17834    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
17835  );
17836
17837  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
17838  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
17839}
17840
17841def TF_XlaKeyValueSortOp : TF_Op<"XlaKeyValueSort", [NoSideEffect]> {
17842  let summary = "Wraps the XLA Sort operator, documented at";
17843
17844  let description = [{
17845https://www.tensorflow.org/performance/xla/operation_semantics#sort
17846.
17847
17848Sorts a tensor. Currently only sorts in ascending order are supported.
17849  }];
17850
17851  let arguments = (ins
17852    Arg<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$keys,
17853    Arg<TF_Tensor, [{A `Tensor` of type V.}]>:$values
17854  );
17855
17856  let results = (outs
17857    Res<TF_IntOrFpTensor, [{A `Tensor` of type K.}]>:$sorted_keys,
17858    Res<TF_Tensor, [{A `Tensor` of type V.}]>:$sorted_values
17859  );
17860
17861  TF_DerivedOperandTypeAttr V = TF_DerivedOperandTypeAttr<1>;
17862  TF_DerivedOperandTypeAttr K = TF_DerivedOperandTypeAttr<0>;
17863}
17864
17865def TF_XlaPadOp : TF_Op<"XlaPad", [NoSideEffect]> {
17866  let summary = "Wraps the XLA Pad operator, documented at";
17867
17868  let description = [{
17869https://www.tensorflow.org/performance/xla/operation_semantics#pad
17870.
17871  }];
17872
17873  let arguments = (ins
17874    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input,
17875    Arg<TF_Tensor, [{A scalar `Tensor` of type T.}]>:$padding_value,
17876    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start of each input dimensions. Must
17877be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_low,
17878    Arg<TF_I32OrI64Tensor, [{the padding to apply at the end of each input dimension. Must
17879be a compile-time constant 1D tensor of length equal to rank of input.}]>:$padding_high,
17880    Arg<TF_I32OrI64Tensor, [{the padding to apply between each input element. Must
17881be a compile-time constant 1D tensor of length equal to rank of input,
17882containing only non-negative values.}]>:$padding_interior
17883  );
17884
17885  let results = (outs
17886    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
17887  );
17888
17889  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17890  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17891}
17892
17893def TF_XlaRecvFromHostOp : TF_Op<"XlaRecvFromHost", []> {
17894  let summary = "An op to receive a tensor from the host.";
17895
17896  let description = [{
17897output: the tensor that will be received from the host.
17898Toutput: element type for output.
17899shape: shape for output.
17900key: A unique identifier for this region used to match up host transfers.
17901  }];
17902
17903  let arguments = (ins
17904    TF_ShapeAttr:$shape,
17905    StrAttr:$key
17906  );
17907
17908  let results = (outs
17909    TF_Tensor:$output
17910  );
17911
17912  TF_DerivedResultTypeAttr Toutput = TF_DerivedResultTypeAttr<0>;
17913}
17914
17915def TF_XlaReduceOp : TF_Op<"XlaReduce", [NoSideEffect]> {
17916  let summary = "Wraps the XLA Reduce operator, documented at";
17917
17918  let description = [{
17919https://www.tensorflow.org/performance/xla/operation_semantics#reduce .
17920  }];
17921
17922  let arguments = (ins
17923    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
17924    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
17925
17926    I64ArrayAttr:$dimensions_to_reduce,
17927    SymbolRefAttr:$reducer
17928  );
17929
17930  let results = (outs
17931    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17932  );
17933
17934  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17935}
17936
17937def TF_XlaReduceWindowOp : TF_Op<"XlaReduceWindow", [NoSideEffect]> {
17938  let summary = "Wraps the XLA ReduceWindow operator, documented at";
17939
17940  let description = [{
17941https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
17942  }];
17943
17944  let arguments = (ins
17945    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$input,
17946    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the reduction}]>:$init_value,
17947    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
17948    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
17949    TF_I32OrI64Tensor:$base_dilations,
17950    TF_I32OrI64Tensor:$window_dilations,
17951    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
17952
17953    SymbolRefAttr:$computation
17954  );
17955
17956  let results = (outs
17957    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17958  );
17959
17960  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<2>;
17961  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
17962}
17963
17964def TF_XlaReplicaIdOp : TF_Op<"XlaReplicaId", [NoSideEffect, TF_NoConstantFold]> {
17965  let summary = "Replica ID.";
17966
17967  let arguments = (ins);
17968
17969  let results = (outs
17970    TF_Int32Tensor:$id
17971  );
17972
17973  // Constant folding is disabled for this op as it is a runtime op and can't
17974  // constant folded at the compile time.
17975}
17976
17977def TF_XlaScatterOp : TF_Op<"XlaScatter", [NoSideEffect]> {
17978  let summary = "Wraps the XLA Scatter operator documented at";
17979
17980  let description = [{
17981https://www.tensorflow.org/xla/operation_semantics#scatter.
17982  }];
17983
17984  let arguments = (ins
17985    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array to be scattered into.}]>:$operand,
17986    Arg<TF_I32OrI64Tensor, [{Array containing the starting indices of the slices that must
17987be scattered to.}]>:$scatter_indices,
17988    Arg<TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Array containing the values that must be used for scattering.}]>:$updates,
17989
17990    SymbolRefAttr:$update_computation,
17991    StrAttr:$dimension_numbers,
17992    BoolAttr:$indices_are_sorted
17993  );
17994
17995  let results = (outs
17996    TensorOf<[TF_Bfloat16, TF_Bool, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
17997  );
17998
17999  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18000  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18001}
18002
18003def TF_XlaSelectAndScatterOp : TF_Op<"XlaSelectAndScatter", [NoSideEffect]> {
18004  let summary = "Wraps the XLA SelectAndScatter operator, documented at";
18005
18006  let description = [{
18007https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter
18008.
18009  }];
18010
18011  let arguments = (ins
18012    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor}]>:$operand,
18013    Arg<TF_I32OrI64Tensor, [{the shape of the window}]>:$window_dimensions,
18014    Arg<TF_I32OrI64Tensor, [{the inter-window strides}]>:$window_strides,
18015    Arg<TF_I32OrI64Tensor, [{the padding to apply at the start and end of each input dimensions}]>:$padding,
18016    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a tensor of values to scatter}]>:$source,
18017    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{a scalar representing the initial value for the output tensor}]>:$init_value,
18018
18019    SymbolRefAttr:$select,
18020    SymbolRefAttr:$scatter
18021  );
18022
18023  let results = (outs
18024    TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
18025  );
18026
18027  TF_DerivedOperandTypeAttr Tindices = TF_DerivedOperandTypeAttr<1>;
18028  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18029}
18030
18031def TF_XlaSelfAdjointEigOp : TF_Op<"XlaSelfAdjointEig", [NoSideEffect]> {
18032  let summary = [{
18033Computes the eigen decomposition of a batch of self-adjoint matrices
18034  }];
18035
18036  let description = [{
18037(Note: Only real inputs are supported).
18038
18039Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in
18040tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for
18041i=0...N-1.
18042  }];
18043
18044  let arguments = (ins
18045    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
18046
18047    BoolAttr:$lower,
18048    I64Attr:$max_iter,
18049    F32Attr:$epsilon
18050  );
18051
18052  let results = (outs
18053    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The eigenvalues in ascending order, each repeated according to its
18054multiplicity.}]>:$w,
18055    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{The column v[..., :, i] is the normalized eigenvector corresponding to the
18056eigenvalue w[..., i].}]>:$v
18057  );
18058
18059  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18060}
18061
18062def TF_XlaSendToHostOp : TF_Op<"XlaSendToHost", []> {
18063  let summary = "An op to send a tensor to the host.";
18064
18065  let description = [{
18066input: the tensor that will be sent to the host.
18067Tinput: element type for input.
18068key: A unique identifier for this region used to match up host transfers.
18069  }];
18070
18071  let arguments = (ins
18072    TF_Tensor:$input,
18073
18074    StrAttr:$key
18075  );
18076
18077  let results = (outs);
18078
18079  TF_DerivedOperandTypeAttr Tinput = TF_DerivedOperandTypeAttr<0>;
18080}
18081
18082def TF_XlaSetDynamicDimensionSizeOp : TF_Op<"XlaSetDynamicDimensionSize", [DeclareOpInterfaceMethods<InferTypeOpInterface>, NoSideEffect, TF_NoConstantFold]> {
18083  let summary = "Make a static dimension into a xla bounded dynamic dimension.";
18084
18085  let description = [{
18086The current static dimension size will become the bound and the second
18087        operand becomes the dynamic size of the dimension.
18088  }];
18089
18090  let arguments = (ins
18091    TF_Tensor:$input,
18092    TF_Int32Tensor:$dim_index,
18093    TF_Int32Tensor:$size
18094  );
18095
18096  let results = (outs
18097    TF_Tensor:$output
18098  );
18099
18100  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18101}
18102
18103def TF_XlaSortOp : TF_Op<"XlaSort", [NoSideEffect]> {
18104  let summary = "Wraps the XLA Sort operator, documented at";
18105
18106  let description = [{
18107https://www.tensorflow.org/performance/xla/operation_semantics#sort
18108.
18109
18110Sorts a tensor. Currently only sorts in ascending order are supported.
18111  }];
18112
18113  let arguments = (ins
18114    Arg<TF_Tensor, [{A `Tensor` of type T.}]>:$input
18115  );
18116
18117  let results = (outs
18118    Res<TF_Tensor, [{A `Tensor` of type T.}]>:$output
18119  );
18120
18121  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18122}
18123
18124def TF_XlaSvdOp : TF_Op<"XlaSvd", [NoSideEffect]> {
18125  let summary = [{
18126Computes the eigen decomposition of a batch of self-adjoint matrices
18127  }];
18128
18129  let description = [{
18130(Note: Only real inputs are supported).
18131
18132Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in
18133tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * Transpose(v[...,:,:]).
18134  }];
18135
18136  let arguments = (ins
18137    Arg<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{the input tensor.}]>:$a,
18138
18139    I64Attr:$max_iter,
18140    F32Attr:$epsilon,
18141    StrAttr:$precision_config
18142  );
18143
18144  let results = (outs
18145    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Singular values. The values are sorted in reverse order of magnitude, so
18146s[..., 0] is the largest value, s[..., 1] is the second largest, etc.}]>:$s,
18147    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Left singular vectors.}]>:$u,
18148    Res<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>, [{Right singular vectors.}]>:$v
18149  );
18150
18151  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18152}
18153
18154def TF_XlaVariadicReduceOp : TF_Op<"XlaVariadicReduce", [NoSideEffect, SameVariadicOperandSize]> {
18155  let summary = "Wraps the variadic XLA Reduce operator.";
18156
18157  let description = [{
18158Semantics are documented at
18159 https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce.
18160  }];
18161
18162  let arguments = (ins
18163    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{the input tensor(s)}]>:$input,
18164    Arg<Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>, [{scalar initial value(s) for the reduction}]>:$init_value,
18165
18166    I64ArrayAttr:$dimensions_to_reduce,
18167    SymbolRefAttr:$reducer
18168  );
18169
18170  let results = (outs
18171    Variadic<TensorOf<[TF_Bfloat16, TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>>:$output
18172  );
18173
18174  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18175  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
18176}
18177
18178def TF_XlaVariadicSortOp : TF_Op<"XlaVariadicSort", [NoSideEffect]> {
18179  let summary = "Wraps the XLA Sort operator, documented at";
18180
18181  let description = [{
18182https://www.tensorflow.org/performance/xla/operation_semantics#sort
18183.
18184
18185Sorts one or more tensors, with support for custom comparator, dimension, and
18186is_stable attributes.
18187  }];
18188
18189  let arguments = (ins
18190    Arg<Variadic<TF_Tensor>, [{A list of `Tensor` of identical shape but possibly different types.}]>:$inputs,
18191    Arg<TF_Int32Tensor, [{The dimension along which to sort. Must be a compile-time constant.}]>:$dimension,
18192
18193    SymbolRefAttr:$comparator,
18194    BoolAttr:$is_stable
18195  );
18196
18197  let results = (outs
18198    Res<Variadic<TF_Tensor>, [{A list of `Tensor` of same shape and types as the `input`.}]>:$outputs
18199  );
18200
18201  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
18202}
18203
18204def TF_Xlog1pyOp : TF_Op<"Xlog1py", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
18205  let summary = "Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise.";
18206
18207  let arguments = (ins
18208    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
18209    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
18210  );
18211
18212  let results = (outs
18213    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
18214  );
18215
18216  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18217}
18218
18219def TF_XlogyOp : TF_Op<"Xlogy", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
18220                 WithBroadcastableBinOpBuilder {
18221  let summary = "Returns 0 if x == 0, and x * log(y) otherwise, elementwise.";
18222
18223  let arguments = (ins
18224    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$x,
18225    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$y
18226  );
18227
18228  let results = (outs
18229    TensorOf<[TF_Complex128, TF_Complex64, TF_Float16, TF_Float32, TF_Float64]>:$z
18230  );
18231
18232  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18233}
18234
18235def TF_ZerosLikeOp : TF_Op<"ZerosLike", [Idempotent, NoSideEffect, SameOperandsAndResultType]> {
18236  let summary = "Returns a tensor of zeros with the same shape and type as x.";
18237
18238  let arguments = (ins
18239    Arg<TF_Tensor, [{a tensor of type T.}]>:$x
18240  );
18241
18242  let results = (outs
18243    Res<TF_Tensor, [{a tensor of the same shape and type as x but filled with zeros.}]>:$y
18244  );
18245
18246  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18247}
18248
18249def TF_ZetaOp : TF_Op<"Zeta", [NoSideEffect, ResultsBroadcastableShape]>,
18250                WithBroadcastableBinOpBuilder {
18251  let summary = [{
18252Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
18253  }];
18254
18255  let description = [{
18256The Hurwitz zeta function is defined as:
18257
18258
18259\\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
18260  }];
18261
18262  let arguments = (ins
18263    TF_F32OrF64Tensor:$x,
18264    TF_F32OrF64Tensor:$q
18265  );
18266
18267  let results = (outs
18268    TF_F32OrF64Tensor:$z
18269  );
18270
18271  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18272}
18273
18274def TF__ArrayToListOp : TF_Op<"_ArrayToList", [NoSideEffect]> {
18275  let summary = "Converts an array of tensors to a list of tensors.";
18276
18277  let arguments = (ins
18278    Variadic<TF_Tensor>:$input
18279  );
18280
18281  let results = (outs
18282    Variadic<TF_Tensor>:$output
18283  );
18284
18285  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18286  TF_DerivedResultTypeListAttr out_types = TF_DerivedResultTypeListAttr<0>;
18287  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
18288}
18289
18290def TF__FusedBatchNormExOp : TF_Op<"_FusedBatchNormEx", [NoSideEffect]> {
18291  let summary = "Internal FusedBatchNorm operation: reserved for internal use.";
18292
18293  let description = [{
18294Do not invoke this operator directly in Python. A fusion optimization is
18295expected to create these operators.
18296  }];
18297
18298  let arguments = (ins
18299    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$x,
18300    TF_Float32Tensor:$scale,
18301    TF_Float32Tensor:$offset,
18302    TF_Float32Tensor:$mean,
18303    TF_Float32Tensor:$variance,
18304    Variadic<TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>>:$side_input,
18305
18306    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18307    DefaultValuedAttr<F32Attr, "1.0f">:$exponential_avg_factor,
18308    DefaultValuedAttr<StrAttr, "Identity">:$activation_mode,
18309    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
18310    DefaultValuedAttr<BoolAttr, "true">:$is_training
18311  );
18312
18313  let results = (outs
18314    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32]>:$y,
18315    TF_Float32Tensor:$batch_mean,
18316    TF_Float32Tensor:$batch_variance,
18317    TF_Float32Tensor:$reserve_space_1,
18318    TF_Float32Tensor:$reserve_space_2,
18319    TF_Float32Tensor:$reserve_space_3
18320  );
18321
18322  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18323  TF_DerivedOperandTypeAttr U = TF_DerivedOperandTypeAttr<1>;
18324  TF_DerivedOperandSizeAttr num_side_inputs = TF_DerivedOperandSizeAttr<5>;
18325}
18326
18327def TF__FusedConv2DOp : TF_Op<"_FusedConv2D", [NoSideEffect]> {
18328  let summary = [{
18329Performs a convolution followed by a specified series of operations.
18330  }];
18331
18332  let description = [{
18333The inputs to the convolution are `input` and `filter`. The series of operations
18334that follows is specified by the `fused_ops` attribute, which is a list of TF op
18335names specified as strings (e.g. "Relu"). They are performed in order, where the
18336(first) input to each op is the output of the preceding op. The first input and
18337the output of each fused_op must be of type T.
18338
18339Currently supported fused_op combinations are: [X] and [X,A], where X is one of
18340{"BiasAdd","FusedBatchNorm"} and A is one of {"Elu","Relu","Relu6"}.
18341
18342* The first input to op X is the Conv2D result, and the additional input(s) to X
18343are specified by `args`.
18344* If there is an op A specified, the output of op X is the input to op A, and op
18345A produces the _FusedConv2D output. Otherwise, op X produces the _FusedConv2D
18346output.
18347
18348*NOTE*: Do not invoke this operator directly in Python. Grappler is expected to
18349create these operators.
18350  }];
18351
18352  let arguments = (ins
18353    TF_F32OrF64Tensor:$input,
18354    TF_F32OrF64Tensor:$filter,
18355    Variadic<TF_F32OrF64Tensor>:$args,
18356
18357    I64ArrayAttr:$strides,
18358    TF_AnyStrAttrOf<["SAME", "VALID", "EXPLICIT"]>:$padding,
18359    DefaultValuedAttr<I64ArrayAttr, "{}">:$explicit_paddings,
18360    DefaultValuedAttr<TF_ConvnetDataFormatAttr, "NHWC">:$data_format,
18361    DefaultValuedAttr<I64ArrayAttr, "{1, 1, 1, 1}">:$dilations,
18362    DefaultValuedAttr<BoolAttr, "true">:$use_cudnn_on_gpu,
18363    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
18364    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18365    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
18366  );
18367
18368  let results = (outs
18369    TF_F32OrF64Tensor:$output
18370  );
18371
18372  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18373  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
18374}
18375
18376def TF__FusedMatMulOp : TF_Op<"_FusedMatMul", [NoSideEffect, TF_SameOperandsAndResultElementTypeResolveRef]> {
18377  let summary = [{
18378Performs a MatMul followed by a specified series of operations.
18379  }];
18380
18381  let description = [{
18382The inputs to the MatMul are specified by `a` and `b`. The series of operations
18383that follows is specified by the `fused_ops` attribute, which is a list of TF op
18384names specified as strings (e.g. "Relu"). They are performed in order, where the
18385(first) input to each op is the output of the preceding op. The first input and
18386the output of each fused_op must be of type T.
18387
18388Currently supported fused_op combinations are: ["BiasAdd"] and ["BiasAdd",A],
18389where A is one of {"Elu","Relu","Relu6"}.
18390
18391* The first input to BiasAdd is the Conv2D result, and the additional BiasAdd
18392input is specified by `args`.
18393* If there is an op A specified, the output of the BiasAdd is the input to op A,
18394and op A produces the _FusedConv2D output. Otherwise, the BiasAdd produces the
18395_FusedConv2D output.
18396
18397*NOTE*: Do not invoke this operator directly in Python. Grappler is
18398expected to create these operators.
18399  }];
18400
18401  let arguments = (ins
18402    TensorOf<[TF_Bfloat16, TF_Float32]>:$a,
18403    TensorOf<[TF_Bfloat16, TF_Float32]>:$b,
18404    Variadic<TensorOf<[TF_Bfloat16, TF_Float32]>>:$args,
18405
18406    DefaultValuedAttr<BoolAttr, "false">:$transpose_a,
18407    DefaultValuedAttr<BoolAttr, "false">:$transpose_b,
18408    DefaultValuedAttr<StrArrayAttr, "{}">:$fused_ops,
18409    DefaultValuedAttr<F32Attr, "0.0001f">:$epsilon,
18410    DefaultValuedAttr<F32Attr, "0.2f">:$leakyrelu_alpha
18411  );
18412
18413  let results = (outs
18414    TensorOf<[TF_Bfloat16, TF_Float32]>:$product
18415  );
18416
18417  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18418  TF_DerivedOperandSizeAttr num_args = TF_DerivedOperandSizeAttr<2>;
18419}
18420
18421def TF__HostRecvOp : TF_Op<"_HostRecv", []> {
18422  let summary = "Receives the named tensor from send_device on recv_device.";
18423
18424  let description = [{
18425_HostRecv produces its output on host memory whereas _Recv produces its
18426output on device memory.
18427  }];
18428
18429  let arguments = (ins
18430    StrAttr:$tensor_name,
18431    StrAttr:$send_device,
18432    I64Attr:$send_device_incarnation,
18433    StrAttr:$recv_device,
18434    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
18435  );
18436
18437  let results = (outs
18438    Res<TF_Tensor, [{The tensor to receive.}]>:$tensor
18439  );
18440
18441  TF_DerivedResultTypeAttr tensor_type = TF_DerivedResultTypeAttr<0>;
18442}
18443
18444def TF__HostSendOp : TF_Op<"_HostSend", []> {
18445  let summary = "Sends the named tensor from send_device to recv_device.";
18446
18447  let description = [{
18448_HostSend requires its input on host memory whereas _Send requires its
18449input on device memory.
18450  }];
18451
18452  let arguments = (ins
18453    Arg<TF_Tensor, [{The tensor to send.}]>:$tensor,
18454
18455    StrAttr:$tensor_name,
18456    StrAttr:$send_device,
18457    I64Attr:$send_device_incarnation,
18458    StrAttr:$recv_device,
18459    DefaultValuedAttr<BoolAttr, "false">:$client_terminated
18460  );
18461
18462  let results = (outs);
18463
18464  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18465}
18466
18467def TF__ListToArrayOp : TF_Op<"_ListToArray", [NoSideEffect]> {
18468  let summary = "Converts a list of tensors to an array of tensors.";
18469
18470  let arguments = (ins
18471    Variadic<TF_Tensor>:$input
18472  );
18473
18474  let results = (outs
18475    Variadic<TF_Tensor>:$output
18476  );
18477
18478  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
18479  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
18480  TF_DerivedResultSizeAttr N = TF_DerivedResultSizeAttr<0>;
18481}
18482
18483def TF__RecvTPUEmbeddingActivationsOp : TF_Op<"_RecvTPUEmbeddingActivations", [TF_TPUEmbeddingSideEffect]> {
18484  let summary = "An op that receives embeddng activations on the TPU.";
18485
18486  let description = [{
18487The TPU system performs the embedding lookups and aggregations. The results of
18488these aggregations are visible to the Tensorflow Graph as the outputs of a
18489_RecvTPUEmbeddingActivations Op. This op returns a list containing one
18490Tensor of activations per table specified in the model.
18491  }];
18492
18493  let arguments = (ins
18494    Arg<TF_VariantTensor, [{A Tensor with type=DT_VARIANT containing the deduplication
18495data. The tensor is an XLA nested tuple containing N elements (where N is
18496the ratio of the number of embedding to tensor cores per TPU chip). Each
18497element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
18498contains indices (DT_UINT32) for embedding lookup on the TensorCore or
18499weights (DT_FLOAT) to apply to the output of the embedding lookup operation.}]>:$deduplication_data,
18500
18501    StrAttr:$config
18502  );
18503
18504  let results = (outs
18505    Res<Variadic<TF_Float32Tensor>, [{A TensorList of embedding activations containing one Tensor per
18506embedding table in the model.}]>:$outputs
18507  );
18508
18509  TF_DerivedResultSizeAttr num_tables = TF_DerivedResultSizeAttr<0>;
18510}
18511
18512def TF__TPUCompileMlirOp : TF_Op<"_TPUCompileMlir", []> {
18513  let summary = [{
18514Compiles a computations for execution on one or more TPU devices.
18515  }];
18516
18517  let description = [{
18518For the internal use of the distributed TPU compiler.
18519
18520'mlir_module' is a serialized MLIR module with a `main` function that contains
18521target computation.
18522'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not
18523known statically at TPUReplication rewrite time.
18524'metadata' is a serialized TPUCompileMetadataProto describing the shapes and
18525types of the inputs to the computation, as well as a mapping onto the TPU pod
18526topology.
18527'program' output is a string key that is passed to the TPUExecute op and used to
18528look up the program in the compilation cache.
18529  }];
18530
18531  let arguments = (ins
18532    Variadic<TF_Int64Tensor>:$dynamic_shapes,
18533
18534    StrAttr:$mlir_module,
18535    StrAttr:$metadata
18536  );
18537
18538  let results = (outs
18539    TF_StrTensor:$compilation_status,
18540    Variadic<TF_StrTensor>:$program
18541  );
18542
18543  TF_DerivedResultSizeAttr num_computations = TF_DerivedResultSizeAttr<1>;
18544  TF_DerivedOperandSizeAttr NumDynamicShapes = TF_DerivedOperandSizeAttr<0>;
18545}
18546
18547def TF__TPUCompileMlirPlaceholderProgramKeyOp : TF_Op<"_TPUCompileMlirPlaceholderProgramKey", []> {
18548  let summary = [{
18549Placeholder program key (compilation cache key) of a _TPUCompileMlir `program`.
18550  }];
18551
18552  let description = [{
18553This op can be used when certain rewrite passes materialize ops that require a
18554program key but the _TPUCompileMlir op has not been added yet. Subsequent
18555rewrite passes must replace this op with a _TPUCompileMlir op `program` output.
18556  }];
18557
18558  let arguments = (ins);
18559
18560  let results = (outs
18561    TF_StrTensor:$program
18562  );
18563}
18564
18565def TF__UnaryOpsCompositionOp : TF_Op<"_UnaryOpsComposition", [NoSideEffect, TF_SameOperandsAndResultTypeResolveRef]> {
18566  let summary = [{
18567*NOTE*: Do not invoke this operator directly in Python. Graph rewrite pass is
18568  }];
18569
18570  let description = [{
18571expected to create these operators.
18572  }];
18573
18574  let arguments = (ins
18575    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$x,
18576
18577    StrArrayAttr:$op_names
18578  );
18579
18580  let results = (outs
18581    TensorOf<[TF_Float16, TF_Float32, TF_Float64]>:$y
18582  );
18583
18584  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
18585}
18586
18587def TF__XlaHostComputeMlirOp : TF_Op<"_XlaHostComputeMlir", []> {
18588  let summary = [{
18589A pseudo-op to represent host-side computation in an XLA program.
18590  }];
18591
18592  let arguments = (ins
18593    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the host.}]>:$inputs,
18594
18595    StrAttr:$send_key,
18596    StrAttr:$recv_key,
18597    DefaultValuedAttr<I64Attr, "0">:$tpu_core
18598  );
18599
18600  let results = (outs
18601    Res<Variadic<TF_Tensor>, [{A list of tensors that will be returned to the device.}]>:$outputs
18602  );
18603
18604  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18605  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18606}
18607
18608def TF__XlaRecvAtHostOp : TF_Op<"_XlaRecvAtHost", []> {
18609  let summary = [{
18610A placeholder op to receive values from a running XLA computation.
18611  }];
18612
18613  let arguments = (ins
18614    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18615execution the transfer corresponds to.}]>:$dynamic_key,
18616
18617    StrAttr:$key,
18618    I64Attr:$device_ordinal
18619  );
18620
18621  let results = (outs
18622    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
18623  );
18624
18625  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18626}
18627
18628def TF__XlaRecvAtHostV2Op : TF_Op<"_XlaRecvAtHostV2", []> {
18629  let summary = [{
18630A placeholder op to receive values from a running XLA computation with support for a runtime device ordinal.
18631  }];
18632
18633  let arguments = (ins
18634    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18635execution the transfer corresponds to.}]>:$dynamic_key,
18636    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
18637
18638    StrAttr:$key
18639  );
18640
18641  let results = (outs
18642    Res<Variadic<TF_Tensor>, [{A list of tensors that will be received from the XLA computation.}]>:$outputs
18643  );
18644
18645  TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
18646}
18647
18648def TF__XlaSendFromHostOp : TF_Op<"_XlaSendFromHost", []> {
18649  let summary = "A placeholder op to send values to a running XLA computation.";
18650
18651  let arguments = (ins
18652    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
18653    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18654execution the transfer corresponds to.}]>:$dynamic_key,
18655
18656    StrAttr:$key,
18657    I64Attr:$device_ordinal
18658  );
18659
18660  let results = (outs);
18661
18662  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18663}
18664
18665def TF__XlaSendFromHostV2Op : TF_Op<"_XlaSendFromHostV2", []> {
18666  let summary = [{
18667A placeholder op to send values to a running XLA computation with support for a runtime device ordinal.
18668  }];
18669
18670  let arguments = (ins
18671    Arg<Variadic<TF_Tensor>, [{A list of tensors that will be sent to the XLA computation.}]>:$inputs,
18672    Arg<TF_StrTensor, [{The key sent at runtime by the compile node to identify which
18673execution the transfer corresponds to.}]>:$dynamic_key,
18674    Arg<TF_Int64Tensor, [{The device id relative to the associated host device.}]>:$device_ordinal,
18675
18676    StrAttr:$key
18677  );
18678
18679  let results = (outs);
18680
18681  TF_DerivedOperandTypeListAttr Tinputs = TF_DerivedOperandTypeListAttr<0>;
18682}
18683