• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
3Licensed under the Apache License, Version 2.0 (the "License");
4you may not use this file except in compliance with the License.
5You may obtain a copy of the License at
6
7    http://www.apache.org/licenses/LICENSE-2.0
8
9Unless required by applicable law or agreed to in writing, software
10distributed under the License is distributed on an "AS IS" BASIS,
11WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12See the License for the specific language governing permissions and
13limitations under the License.
14==============================================================================*/
15
16// This is the operation definition file for TensorFlow.
17//
18// This file contains TensorFlow ops whose definitions are amended to fix
19// issues or provide more information. In this file you have full control
20// of the op definition; all changes will be retained with subsequent
21// refreshes.
22//
23// This file includes another file, `tf_generated_ops.td`, which contains
24// all ops whose definitions are generated from TensorFlow codebase.
25// Changes made there are not respected.
26
27#ifndef TF_OPS
28#define TF_OPS
29
30include "tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td"
31include "tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td"
32include "mlir/Interfaces/CallInterfaces.td"
33include "mlir/Interfaces/ControlFlowInterfaces.td"
34include "mlir/Interfaces/InferTypeOpInterface.td"
35include "mlir/Interfaces/LoopLikeInterface.td"
36include "mlir/Interfaces/SideEffectInterfaces.td"
37include "mlir/IR/OpBase.td"
38
39class TF_TensorListInitOp<string mnemonic> : TF_Op<mnemonic, [NoSideEffect]> {
40  let results = (outs
41    TF_VariantTensor:$handle
42  );
43
44  TF_DerivedOperandTypeAttr shape_type = TF_DerivedOperandTypeAttr<0>;
45
46  let verifier = [{
47    // This is required to populate derived attributes during export in a
48    // meaningful way. Else during export to GraphDef element_type() query
49    // will result in out of bounds access/assert.
50    if (handle_dtype().getSubtypes().size() != 1) {
51      return emitOpError(
52          "must have exactly one subtype in the result variant type");
53    }
54
55    return Verify(*this);
56  }];
57
58  DerivedTypeAttr element_dtype = DerivedTypeAttr<
59      "return getElementTypeOrSelf(element_type());">;
60
61  let extraClassDeclaration = [{
62    // Returns type of the TensorList element produced by this op.
63    TensorType element_type() { return handle_dtype().getSubtypes()[0]; }
64
65    // Returns data type of the result handle. Returned type contains type of
66    // the TensorList element as a subtype.
67    VariantType handle_dtype() {
68      return getElementTypeOrSelf(handle().getType()).cast<TF::VariantType>();
69    }
70  }];
71}
72
73def TF_CaseOp : TF_Op<"Case", []> {
74  let summary = [{
75An n-way switch statement which calls a single branch function.
76  }];
77
78  let description = [{
79An n-way switch statement, implementing the following:
80    ```
81    switch (branch_index) {
82      case 0:
83        output = branches[0](input);
84        break;
85      case 1:
86        output = branches[1](input);
87        break;
88      ...
89      case [[nbranches-1]]:
90      default:
91        output = branches[nbranches-1](input);
92        break;
93    }
94    ```
95  }];
96
97  let arguments = (ins
98    I32Tensor:$branch_index,
99    Variadic<TF_Tensor>:$input,
100
101    Confined<SymbolRefArrayAttr, [ArrayMinCount<1>]>:$branches,
102
103    // Used to map StatelessCase and Case op defined in TensorFlow to a common
104    // op.
105    BoolAttr:$is_stateless
106  );
107
108  let results = (outs
109    Variadic<TF_Tensor>:$output
110  );
111
112  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
113  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
114  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
115
116  let hasCanonicalizer = 1;
117
118  let verifier = [{
119    return Verify(*this);
120  }];
121
122
123 let extraClassDeclaration = [{
124    int num_branches() { return branches().size(); }
125
126    // Gets function corresponding branch # `index`.
127    FuncOp branch_function(int index) {
128      auto flat_sym_ref = branches()[index].cast<FlatSymbolRefAttr>();
129      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, flat_sym_ref);
130    }
131
132    // Gets all branch functions.
133    void get_branch_functions(SmallVectorImpl<FuncOp> &functions) {
134      functions.reserve(num_branches());
135      for (int idx : llvm::seq<int>(0, num_branches()))
136        functions.push_back(branch_function(idx));
137    }
138  }];
139}
140
141def TF_CaseRegionOp : TF_Op<"CaseRegion",
142      [SingleBlockImplicitTerminator<"YieldOp">, NoRegionArguments]> {
143  let summary = [{
144An n-way switch statement which calls a single branch function.
145  }];
146
147  let description = [{
148An n-way switch statement, implementing the following:
149    ```
150    switch (branch_index) {
151      case 0:
152        output = branches[0](input);
153        break;
154      case 1:
155        output = branches[1](input);
156        break;
157      ...
158      case [[nbranches-1]]:
159      default:
160        output = branches[nbranches-1](input);
161        break;
162    }
163    ```
164  }];
165
166  let arguments = (ins
167    I32Tensor:$branch_index,
168
169    // Used to map StatelessCase and Case op defined in TensorFlow to a common
170    // op.
171    BoolAttr:$is_stateless
172  );
173
174  let results = (outs
175    Variadic<TF_Tensor>:$output
176  );
177
178  let regions = (region VariadicRegion<SizedRegion<1>>:$branches);
179
180  let verifier = [{
181    return Verify(*this);
182  }];
183
184  let hasCanonicalizer = 1;
185
186}
187
188// In MLIR, the TensorFlow tensor value is represented as an ElementsAttr, with
189// its type encoding the tensor's shape and data type.
190def TF_ConstOp : TF_Op<"Const", [ConstantLike, NoSideEffect,
191    DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
192  let summary = "Constant tensor op";
193
194  let arguments = (ins
195    ElementsAttr:$value
196  );
197
198  let results = (outs
199    TF_Tensor:$output
200  );
201
202  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
203
204  let builders = [
205    OpBuilderDAG<(ins "Attribute":$value)>,
206    OpBuilderDAG<(ins "Type":$type, "Attribute":$value)>,
207  ];
208
209  let hasFolder = 1;
210
211  let extraClassDeclaration = [{
212    static bool isCompatibleReturnTypes(ArrayRef<Type> l, ArrayRef<Type> r) {
213      return BroadcastCompatible(l, r);
214    }
215  }];
216}
217
218def TF_CollectivePermuteOp : TF_Op<"CollectivePermute", []> {
219  let summary = "An Op to permute tensors across replicated TPU instances.";
220
221  let description = [{
222Each instance supplies its own input.
223
224For example, suppose there are 4 TPU instances: `[A, B, C, D]`. Passing
225source_target_pairs=`[[0,1],[1,2],[2,3],[3,0]]` gets the outputs:
226`[D, A, B, C]`.
227  }];
228
229  let arguments = (ins
230    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Complex128, TF_Complex64, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$input,
231    I32Tensor:$source_target_pairs
232  );
233
234  let results = (outs
235    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Float64, TF_Int16, TF_Int32, TF_Int64, TF_Int8, TF_Complex128, TF_Complex64, TF_Qint32, TF_Qint8, TF_Quint8, TF_Uint16, TF_Uint32, TF_Uint64, TF_Uint8]>:$output
236  );
237
238  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
239}
240
241def TF_XlaAllReduceOp : TF_Op<"XlaAllReduce", [NoSideEffect, TF_AllTypesMatch<["input", "output"]>]> {
242  let summary = "An Op to reduce inputs across replicated TPU instances.";
243
244  let arguments = (ins
245    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>:$input,
246    TF_Int32Tensor:$group_assignment,
247    TF_AnyStrAttrOf<["Min", "Max", "Mul", "Add", "Mean"]>:$reduce_op
248  );
249
250  let results = (outs
251    TensorOf<[TF_Bfloat16, TF_Float16, TF_Float32, TF_Int32, TF_Uint32]>:$output
252  );
253
254  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
255}
256
257def TF_EmptyTensorListOp : TF_TensorListInitOp<"EmptyTensorList"> {
258  let summary = "Creates and returns an empty tensor list.";
259
260  let description = [{
261All list elements must be tensors of dtype element_dtype and shape compatible
262with element_shape.
263
264handle: an empty tensor list.
265element_dtype: the type of elements in the list.
266element_shape: a shape compatible with that of elements in the list.
267  }];
268
269  let arguments = (ins
270    TF_I32OrI64Tensor:$element_shape,
271    TF_Int32Tensor:$max_num_elements
272  );
273}
274
275// TODO(fengliuai): The tf.Identity is side-effect free and it doesn't change
276// the status of the system during the execution. However it shouldn't be folded
277// in general if it used to serve for caching and some other invariant checks,
278// so we removed the side-effect free property in the op definition. This is a
279// hack, and we should fix it if we have a better way to model it.
280def TF_IdentityOp : TF_Op<"Identity", [TF_OperandsSameAsResultsTypeOrRef]> {
281  let summary = "Identity op";
282
283  let description = [{
284Returns a tensor with the same shape and contents as input.
285  }];
286
287  let arguments = (ins
288    TF_Tensor:$input
289  );
290
291  let results = (outs
292    TF_Tensor:$output
293  );
294
295  TF_DerivedResultTypeAttr T = TF_DerivedResultTypeAttr<0>;
296}
297
298def TF_IfOp : TF_Op<"If", []> {
299  let summary = "output = cond ? then_branch(input) : else_branch(input)";
300
301  let description = [{
302output = cond ? then_branch(input) : else_branch(input)
303
304cond: A Tensor. If the tensor is a scalar of non-boolean type, the
305    scalar is converted to a boolean according to the
306    following rule: if the scalar is a numerical value, non-zero means
307    True and zero means False; if the scalar is a string, non-empty
308    means True and empty means False. If the tensor is not a scalar,
309    being empty means False and being non-empty means True.
310input: A list of input tensors.
311then_branch: A function that takes 'inputs' and returns a list of
312    tensors, whose types are the same as what else_branch returns.
313else_branch: A function that takes 'inputs' and returns a list of
314    tensors.  whose types are the same as what then_branch returns.
315  }];
316
317  let arguments = (ins
318    TF_Tensor:$cond,
319    Variadic<TF_Tensor>:$input,
320
321    FlatSymbolRefAttr:$then_branch,
322    FlatSymbolRefAttr:$else_branch,
323
324    // Used to map StatelessIf and If op defined in TensorFlow to a common op.
325    BoolAttr:$is_stateless
326  );
327
328  let results = (outs
329    Variadic<TF_Tensor>:$output
330  );
331
332  TF_DerivedOperandTypeAttr Tcond = TF_DerivedOperandTypeAttr<0>;
333  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<1>;
334  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
335  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
336
337  let verifier = [{
338    return Verify(*this);
339  }];
340
341  let hasCanonicalizer = 1;
342
343  let extraClassDeclaration = [{
344    // Get the then branch function.
345    FuncOp then_function() {
346     return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, then_branch());
347    }
348
349    // Get the else branch function.
350    FuncOp else_function() {
351     return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, else_branch());
352    }
353  }];
354}
355
356def TF_YieldOp : TF_Op<"Yield",
357      [NoSideEffect, ReturnLike, Terminator,
358       ParentOneOf<["CaseRegionOp", "IfRegionOp", "WhileRegionOp"]>]> {
359  let summary = "Yield operation";
360
361  let description = [{
362    The "yield" operation represents a return operation within the conditional
363    and body of structured control flow (e.g., if and while). The operation
364    takes a variable number of operands and produces no results. The number and
365    types of inputs must match the signature of the operation that contains the
366    region.
367  }];
368
369  let arguments = (ins Variadic<AnyType>:$operands);
370}
371
372def TF_IfRegionOp : TF_Op<"IfRegion",
373      [SingleBlockImplicitTerminator<"YieldOp">, NoRegionArguments]> {
374  let summary = "output = cond ? then_branch output : else_branch output";
375
376  let description = [{
377"output = cond ? then_branch output : else_branch output"
378
379cond: A Tensor. If the tensor is a scalar of non-boolean type, the
380    scalar is converted to a boolean according to the
381    following rule: if the scalar is a numerical value, non-zero means
382    True and zero means False; if the scalar is a string, non-empty
383    means True and empty means False. If the tensor is not a scalar,
384    being empty means False and being non-empty means True.
385then_branch: A region that computes the outputs of the op if cond = true.
386    It returns a list of tensors using tf.yield (as the terminator). The
387    types of these returned tensors is same as that of the else_branch
388else_branch: A region that computes the outputs of the op if cond = false.
389    It returns a list of tensors using tf.yield (as the terminator). The
390    types of these returned tensors is same as that of the then_branch
391  }];
392
393  let arguments = (ins
394    0DTensorOf<[I1]>:$cond,
395
396    // Used to map StatelessIf and If op defined in TensorFlow to a common op.
397    BoolAttr:$is_stateless,
398    // Used to maintain function name when round-tripping
399    // between functional and regional control flow.  This can be removed if
400    // the runtime does not require globally unique then/else branch function names.
401    OptionalAttr<StrAttr>:$_then_func_name,
402    OptionalAttr<StrAttr>:$_else_func_name
403  );
404
405  let results = (outs
406    Variadic<TF_Tensor>:$output
407  );
408
409  let regions = (region SizedRegion<1>:$then_branch, SizedRegion<1>:$else_branch);
410
411  let verifier = [{
412    return Verify(*this);
413  }];
414
415  let builders = [
416    OpBuilderDAG<(ins "TypeRange":$resultTypes, "ValueRange":$operands,
417      "llvm::ArrayRef<::mlir::NamedAttribute>":$attributes,
418      "unsigned":$numRegions),
419    [{
420      assert(numRegions == 2u && "mismatched number of regions");
421      build($_builder, $_state, resultTypes, operands, attributes);
422    }]>];
423
424  let hasCanonicalizer = 1;
425}
426
427def TF_LegacyCallOp : TF_Op<"LegacyCall",
428                            [CallOpInterface, NoSideEffect]> {
429  let summary =
430    "returns `f(inputs)`, where `f` is a function.";
431
432  let description = [{
433    The LegacyCall operation represents a direct call to a function that is
434    within the same symbol scope as the call and is mapped to a GraphDef node
435    with the function name as the op name. Unlike a PartitionedCall which
436    represents asynchronously executing a function across multiple devices, a
437    LegacyCall ignores specification for ops in the attached function and
438    instead executes it on the device assigned to this op.
439  }];
440
441  let arguments = (ins
442    Variadic<TF_Tensor>:$args,
443
444    FlatSymbolRefAttr:$f,
445    DefaultValuedAttr<BoolAttr, "false">:$_disable_call_shape_inference
446  );
447
448  let results = (outs
449    Variadic<TF_Tensor>:$output
450  );
451
452  let extraClassDeclaration = [{
453    // Gets the argument operands to the called function.
454    operand_range getArgOperands() { return args(); }
455
456    // Returns the callee of this operation.
457    CallInterfaceCallable getCallableForCallee() { return fAttr(); }
458
459    // returns the callee of this operation.
460    FuncOp func() {
461      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
462    }
463  }];
464}
465
466def TF_ParseExampleOp : TF_Op<"ParseExample",
467                               [NoSideEffect,
468                                AttrSizedResultSegments,
469                                AttrSizedOperandSegments]> {
470
471  let summary =
472    "Transforms a vector of tf.Example protos (as strings) into typed tensors.";
473
474  let arguments = (ins
475    TF_StrTensor:$serialized,
476    TF_StrTensor:$names,
477    Variadic<TF_StrTensor>:$sparse_keys,
478    Variadic<TF_StrTensor>:$dense_keys,
479    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$dense_defaults,
480
481    TF_ShapeAttrArray:$dense_shapes,
482    I32ElementsAttr:$result_segment_sizes,
483    I32ElementsAttr:$operand_segment_sizes
484  );
485
486  let results = (outs
487    Variadic<TF_Int64Tensor>:$sparse_indices,                           // len(sparse_types)
488    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$sparse_values,  // len(sparse_types)
489    Variadic<TF_Int64Tensor>:$sparse_shapes,                            // len(sparse_types)
490    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$dense_values    // len(Tdense)
491  );
492
493  TF_DerivedOperandSizeAttr Nsparse = TF_DerivedOperandSizeAttr<2>;
494  TF_DerivedOperandSizeAttr Ndense = TF_DerivedOperandSizeAttr<3>;
495  TF_DerivedOperandTypeListAttr Tdense = TF_DerivedOperandTypeListAttr<4>;
496  TF_DerivedResultTypeListAttr sparse_types = TF_DerivedResultTypeListAttr<1>;
497
498  let verifier = ?;
499}
500
501def TF_ParseExampleV2Op : TF_Op<"ParseExampleV2",
502                                [NoSideEffect,
503                                 AttrSizedResultSegments]> {
504
505  let summary =
506    "Transforms a vector of tf.Example protos (as strings) into typed tensors.";
507
508  let arguments = (ins
509    TF_StrTensor:$serialized,
510    TF_StrTensor:$names,
511    TF_StrTensor:$sparse_keys,
512    TF_StrTensor:$dense_keys,
513    TF_StrTensor:$ragged_keys,
514    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$dense_defaults,
515
516    Confined<I64Attr, [IntMinValue<0>]>:$num_sparse,
517    TF_ShapeAttrArray:$dense_shapes,
518    I32ElementsAttr:$result_segment_sizes
519  );
520
521  let results = (outs
522    Variadic<TF_Int64Tensor>:$sparse_indices,                           // len(sparse_types)
523    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$sparse_values,  // len(sparse_types)
524    Variadic<TF_Int64Tensor>:$sparse_shapes,                            // len(sparse_types)
525    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$dense_values,   // len(Tdense)
526    Variadic<TensorOf<[TF_Float32, TF_Int64, TF_Str]>>:$ragged_values,  // len(ragged_value_types)
527                                                            //     = len(ragged_split_types)
528    Variadic<TensorOf<[TF_Int32, TF_Int64]>>:$ragged_row_splits         // len(ragged_split_types)
529                                                            //     = len(ragged_value_types)
530  );
531
532  // The Verify(ParseExampleV2Op) function validates that the lengths and types
533  // of these attrs are compatible.
534  TF_DerivedOperandTypeListAttr Tdense = TF_DerivedOperandTypeListAttr<5>;
535  TF_DerivedResultTypeListAttr sparse_types = TF_DerivedResultTypeListAttr<1>;
536  TF_DerivedResultTypeListAttr ragged_value_types =
537    TF_DerivedResultTypeListAttr<4>;
538  TF_DerivedResultTypeListAttr ragged_split_types =
539    TF_DerivedResultTypeListAttr<5>;
540
541  let verifier = [{
542    return Verify(*this);
543  }];
544}
545
546def TF_PartitionedCallOp : TF_Op<"PartitionedCall",
547                                 [CallOpInterface, NoSideEffect]> {
548  let summary =
549    "returns `f(inputs)`, where `f`'s body is placed and partitioned.";
550
551  let description = [{
552Asynchronously executes a function, potentially across multiple devices but
553within a single process. The kernel places and partitions a given function's
554underlying graph, and executes each of the partitioned subgraphs as a function.
555  }];
556
557  let arguments = (ins
558    Variadic<TF_Tensor>:$args,
559
560    SymbolRefAttr:$f,
561    StrAttr:$config,
562    StrAttr:$config_proto,
563    StrAttr:$executor_type
564  );
565
566  let results = (outs
567    Variadic<TF_Tensor>:$output
568  );
569
570  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
571  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
572
573  let extraClassDeclaration = [{
574    // Gets the argument operands to the called function.
575    operand_range getArgOperands() { return args(); }
576
577    // Returns the callee of this operation.
578    CallInterfaceCallable getCallableForCallee() { return fAttr(); }
579
580    // returns the callee of this operation.
581    FuncOp func() {
582      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
583    }
584  }];
585
586  let verifier = [{ return VerifyPartitionedCall(*this); }];
587}
588
589def TF_PlaceholderOp : TF_Op<"Placeholder", [NoSideEffect]> {
590  let summary = "Placeholder op";
591
592  let description = [{
593Inserts a placeholder for a tensor that will be always fed.
594  }];
595
596  let arguments = (ins
597  );
598
599  let results = (outs
600    TF_Tensor:$output
601  );
602
603  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
604}
605
606def TF_PlaceholderWithDefaultOp : TF_Op<"PlaceholderWithDefault", [NoSideEffect]> {
607  let summary = "Placeholder op";
608
609  let description = [{
610    A placeholder op that passes through input when its output is not fed.
611  }];
612
613  let arguments = (ins
614    TF_Tensor:$input
615  );
616
617  let results = (outs
618    TF_Tensor:$output
619  );
620
621  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
622  DerivedAttr shape = TF_DerivedResultShapeAttr;
623}
624
625def TF_StatefulPartitionedCallOp : TF_Op<"StatefulPartitionedCall",
626                                         [CallOpInterface]> {
627  let summary =
628    "returns `f(inputs)`, where `f`'s body is placed and partitioned.";
629
630  let description = [{
631Asynchronously executes a function, potentially across multiple devices but
632within a single process. The kernel places and partitions a given function's
633underlying graph, and executes each of the partitioned subgraphs as a function.
634  }];
635
636  let arguments = (ins
637    Variadic<TF_Tensor>:$args,
638
639    FlatSymbolRefAttr:$f,
640    StrAttr:$config,
641    StrAttr:$config_proto,
642    StrAttr:$executor_type
643  );
644
645  let results = (outs
646    Variadic<TF_Tensor>:$output
647  );
648
649  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
650  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
651
652  let extraClassDeclaration = [{
653    // Gets the argument operands to the called function.
654    operand_range getArgOperands() { return args(); }
655
656    // Returns the callee of this operation.
657    CallInterfaceCallable getCallableForCallee() { return fAttr(); }
658
659    // returns the callee of this operation.
660    FuncOp func() {
661      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
662    }
663  }];
664
665  let verifier = [{ return VerifyPartitionedCall(*this); }];
666}
667
668def TF_WhileOp : TF_Op<"While", []> {
669  let summary = [{
670output = input; While (Cond(output)) { output = Body(output) }
671  }];
672
673  let description = [{
674output = input; While (Cond(output)) { output = Body(output) }
675
676input: A list of input tensors whose types are T.
677output: A list of output tensors whose types are T.
678cond: A function that takes 'input' and returns a tensor.  If the tensor is
679    a scalar of non-boolean, the scalar is converted to a boolean
680    according to the following rule: if the scalar is a numerical
681    value, non-zero means True and zero means False; if the scalar is
682    a string, non-empty means True and empty means False. If the
683    tensor is not a scalar, non-emptiness means True and False
684    otherwise.
685body: A function that takes a list of tensors and returns another
686      list of tensors. Both lists have the same types as specified
687      by T.
688  }];
689
690  let arguments = (ins
691    Variadic<TF_Tensor>:$input,
692
693    FlatSymbolRefAttr:$cond,
694    FlatSymbolRefAttr:$body,
695    DefaultValuedAttr<I64Attr, "10">:$parallel_iterations,
696
697    // Used to map StatelessWhile and While op defined in TensorFlow to a common
698    // op.
699    BoolAttr:$is_stateless,
700
701    // In TensorFlow, While has a special behavior where if `output_shapes`
702    // attribute is not empty, those shapes are used in its shape function
703    // as result shapes instead of propagating operand shapes as result shapes.
704    // This allows for different result shapes from operand shapes. While these
705    // shapes are imported and set as a part of the result type, there is no
706    // indicator differentiating between having no output shapes compared to
707    // having all unranked shapes. Thus this attribute is set to determine
708    // which shape function behavior to use for this op, specifically
709    // propagating operand shapes as result shapes when this attribute is not
710    // set, or preserving result shapes as is when this attribute is set.
711    UnitAttr:$shape_invariant
712  );
713
714  let results = (outs
715    Variadic<TF_Tensor>:$output
716  );
717
718  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
719  TF_DerivedResultShapeListAttr output_shapes = TF_DerivedResultShapeListAttr<0>;
720
721  let verifier = [{
722    return Verify(*this);
723  }];
724
725  let extraClassDeclaration = [{
726    // Get the condition function.
727    FuncOp cond_function() {
728      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, cond());
729    }
730
731    // Get the body function.
732    FuncOp body_function() {
733      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, body());
734    }
735  }];
736}
737
738def TF_WhileRegionOp : TF_Op<"WhileRegion",
739      [DeclareOpInterfaceMethods<LoopLikeOpInterface>,
740       SingleBlockImplicitTerminator<"YieldOp">]> {
741  let summary = "while operation";
742  let description = [{
743  The tf.WhileRegion op represents a while loop using 2 regions and a set of
744  iteration variables. The iteration variables maintained by this Op have the
745  same types as the inputs. The Op executes a while loop described by the
746  following pseudo code:
747
748  ```
749     func WhileRegionOp(inputs) {
750       iteration_vars = inputs;
751       while (cond(iteration_vars)) {
752           iteration_vars = body(iteration_vars);
753       }
754       return iteration_vars;
755     }
756  ```
757
758  `cond` is the condition region and `body` is the body region. Both these
759  regions accept the current value of the iteration variables as inputs. The
760  condition region returns a tensor<i1> which, if false, will exit the loop.
761  The body region computes new values of the iteration variables. The iteration
762  variables are initialized to the Op input, and the results of the
763  tf.WhileRegion op are the final values of the iteration variables.
764
765  This implies that the operand and result types for tf.WhileRegion should be
766  the same. Note that the condition and body regions can implicitly capture
767  loop invariant values directly. In canonical form, iteration variables that
768  pass through the loop body unmodified are converted to implicitly captured
769  references to their values outside the loop.
770  }];
771
772  let arguments = (ins
773    Variadic<AnyTensor>:$input,
774
775    DefaultValuedAttr<I64Attr, "10">:$parallel_iterations,
776
777    // Used to map StatelessWhile and While op defined in TensorFlow to a common
778    // op.
779    BoolAttr:$is_stateless,
780
781    // In TensorFlow, While has a special behavior where if `output_shapes`
782    // attribute is not empty, those shapes are used in its shape function
783    // as result shapes instead of propagating operand shapes as result shapes.
784    // This allows for different result shapes from operand shapes. While these
785    // shapes are imported and set as a part of the result type, there is no
786    // indicator differentiating between having no output shapes compared to
787    // having all unranked shapes. Thus this attribute is set to determine
788    // which shape function behavior to use for this op, specifically
789    // propagating operand shapes as result shapes when this attribute is not
790    // set, or preserving result shapes as is when this attribute is set.
791    UnitAttr:$shape_invariant
792  );
793  let results = (outs Variadic<AnyTensor>:$output);
794
795  let regions = (region SizedRegion<1>:$cond, SizedRegion<1>:$body);
796
797  let verifier = [{ return Verify(*this); }];
798
799  let hasCanonicalizer = 1;
800}
801
802def TF_TensorListReserveOp : TF_TensorListInitOp<"TensorListReserve"> {
803  let summary = "List of the given size with empty elements.";
804
805  let description = [{
806element_shape: the shape of the future elements of the list
807num_elements: the number of elements to reserve
808handle: the output list
809element_dtype: the desired type of elements in the list.
810  }];
811
812  let arguments = (ins
813    TF_I32OrI64Tensor:$element_shape,
814    TF_Int32Tensor:$num_elements
815  );
816}
817
818// This operation when auto-generated is marked as NoSideEffect because it isn't
819// stateful in TensorFlow. However it is kept alive through control dependency,
820// and does not have any output. When placed in an island it wouldn't be kept
821// alive in any way and the canonicalizer would just always fold it away.
822def TF_TPUReplicateMetadataOp : TF_Op<"TPUReplicateMetadata", []> {
823  let summary = [{
824Metadata indicating how the TPU computation should be replicated.
825  }];
826
827  let description = [{
828This operation holds the metadata common to operations of a `tpu.replicate()` computation subgraph.
829  }];
830
831  let arguments = (ins
832    Confined<I64Attr, [IntMinValue<0>]>:$num_replicas,
833    DefaultValuedAttr<I64Attr, "1">:$num_cores_per_replica,
834    StrAttr:$topology,
835    DefaultValuedAttr<BoolAttr, "true">:$use_tpu,
836    DefaultValuedAttr<I64ArrayAttr, "{}">:$device_assignment,
837    DefaultValuedAttr<I64ArrayAttr, "{}">:$computation_shape,
838    DefaultValuedAttr<StrArrayAttr, "{}">:$host_compute_core,
839    DefaultValuedAttr<StrArrayAttr, "{}">:$padding_map,
840    DefaultValuedAttr<StrAttr, "STEP_MARK_AT_ENTRY">:$step_marker_location,
841    DefaultValuedAttr<BoolAttr, "false">:$allow_soft_placement,
842    DefaultValuedAttr<BoolAttr, "false">:$use_spmd_for_xla_partitioning
843  );
844
845  let results = (outs);
846}
847
848def TF_VarHandleOp : TF_Op<"VarHandleOp", [TF_ResourceHandleAllocatorInterface]> {
849  let summary = "Creates a handle to a Variable resource from its name.";
850
851  let description = [{
852container: the container this variable is placed in.
853shared_name: the name by which this variable is referred to.
854dtype and shape: attributes representing the data type and shape held in the
855  variable.
856
857Example:
858    resource_variable_ops.var_handle_op(
859          dtype=dtypes.int32, shape=[8, 16], container="foo", shared_name="bar")
860  returns a handle for a variable with name "bar" in container "foo", and the
861  variable holds a tensor of shape [8, 16] and dtype int32.
862  }];
863
864  let arguments = (ins
865    DefaultValuedAttr<StrAttr, "">:$container,
866    DefaultValuedAttr<StrAttr, "">:$shared_name
867  );
868
869  let results = (outs
870    Res<TF_ResourceTensor, "", [TF_VariableAlloc]>:$resource
871  );
872
873  let verifier = [{
874    // VarHandleOp requires the resource handle supply a single subtype from
875    // which to derive the dtype and shape attributes.
876    if (resource_type().getSubtypes().size() != 1) {
877      return emitOpError(
878          "must have exactly one subtype in the result resource type");
879    }
880
881    return success();
882  }];
883
884  DerivedTypeAttr dtype = DerivedTypeAttr<
885      "return getElementTypeOrSelf(resource_subtype());">;
886  DerivedAttr shape = DerivedAttr<
887      "ShapedType",
888      "return resource_subtype().cast<ShapedType>();",
889      [{ mlir::TF::ShapeAttr::get($_ctx, $_self) }]>;
890
891  let extraClassDeclaration = [{
892    // TF_ResourceHandleAllocatorInterface:
893    ResourceHandleValueAndId GetResourceHandleValueAndId(
894      llvm::SmallDenseMap<ResourceHandle, int64_t> &resource_handle_id_map,
895      int64_t &next_id);
896
897    TensorType resource_subtype() { return resource_type().getSubtypes()[0]; }
898
899    ResourceType resource_type() {
900      return getElementTypeOrSelf(resource()).cast<TF::ResourceType>();
901    }
902  }];
903}
904
905def TF_EnqueueTPUEmbeddingBatchOp : TF_Op<"EnqueueTPUEmbeddingBatch", [TF_TPUEmbeddingSideEffect]> {
906  let summary = [{
907An op that enqueues a list of input batch tensors to TPUEmbedding.
908  }];
909
910  let arguments = (ins
911    Variadic<TF_StrTensor>:$batch,
912    TF_StrTensor:$mode_override,
913
914    DefaultValuedAttr<I64Attr, "-1">:$device_ordinal,
915    DefaultValuedAttr<StrArrayAttr, "{}">:$combiners
916  );
917
918  let results = (outs);
919
920  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
921}
922
923// Multiple variadic operands with different sizes are not supported by the
924// dialect generator, so we manually added the op.
925def TF_SendTPUEmbeddingGradientsOp : TF_Op<"SendTPUEmbeddingGradients", [AttrSizedOperandSegments, TF_TPUEmbeddingSideEffect]> {
926  let summary = "Performs gradient updates of embedding tables.";
927
928  let description = [{
929inputs: A TensorList of gradients with which to update embedding tables.
930    This argument has the same length and shapes as the return value of
931    RecvTPUEmbeddingActivations, but contains gradients of the model's loss
932    with respect to the embedding activations. The embedding tables are updated
933    from these gradients via the optimizer specified in the TPU embedding
934    configuration given to tpu.initialize_system.
935learning_rates: A TensorList of float32 scalars, one for each dynamic learning
936    rate tag: see the comments in
937    //third_party/tensorflow/core/protobuf/tpu/optimization_parameters.proto.
938    Multiple tables can share the same dynamic learning rate tag as specified
939    in the configuration. If the learning rates for all tables are constant,
940    this list should be empty.
941config: Serialized TPUEmbeddingConfiguration proto.
942  }];
943
944  let arguments = (ins
945    Variadic<TF_Tensor>:$inputs,
946    Variadic<TF_Tensor>:$learning_rates,
947    StrAttr:$config
948  );
949
950  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
951  TF_DerivedOperandSizeAttr NN = TF_DerivedOperandSizeAttr<1>;
952}
953
954// Multiple variadic operands with different sizes are not supported by the
955// dialect generator, so we manually added the op.
956def TF__SendTPUEmbeddingGradientsOp : TF_Op<"_SendTPUEmbeddingGradients", [AttrSizedOperandSegments, TF_TPUEmbeddingSideEffect]> {
957  let summary = "Performs gradient updates of embedding tables.";
958
959  let description = [{
960The gradients argument is a TensorList having the same length and shapes as the
961return value of _RecvTPUEmbeddingActivations, but contains gradients of the
962model's loss with respect to the embedding activations. The embedding tables are
963updated from these gradients via the optimizer specified in the
964TPUEmbeddingConfiguration proto given to tpu.initialize_system.
965
966gradients: A TensorList of gradients with which to update embedding tables.
967learning_rates: A TensorList of learning rates used for updating the embedding
968    tables via the optimizer. The length of the TensorList must be equal to the
969    number of dynamic learning rate tags specified in the
970    TPUEmbeddingConfiguration proto.
971deduplication_data: A Tensor with type=DT_VARIANT containing the deduplication
972    data. The tensor is an XLA nested tuple containing N elements. Each
973    element of the nested tuple is a tuple of rank 1 tensors. Each tensor either
974    contains indices (DT_INT32) for embedding lookup or weights (DT_FLOAT) to
975    apply to the output of the embedding lookup operation.
976config: Serialized TPUEmbeddingConfiguration proto.
977  }];
978
979  let arguments = (ins
980    Variadic<TF_Tensor>:$gradients,
981    Variadic<TF_Tensor>:$learning_rates,
982    TF_VariantTensor:$deduplication_data,
983    StrAttr:$config
984  );
985
986  TF_DerivedOperandSizeAttr NumTables = TF_DerivedOperandSizeAttr<0>;
987  TF_DerivedOperandSizeAttr NumLearningRateTags = TF_DerivedOperandSizeAttr<1>;
988}
989
990// Updated the op description text from the auto-generated op definition.
991def TF__RecvTPUEmbeddingDeduplicationDataOp : TF_Op<"_RecvTPUEmbeddingDeduplicationData", []> {
992  let summary = [{
993Receives deduplication data (indices and weights).
994  }];
995
996  let description = [{
997The deduplication data is a Tensor with type=DT_VARIANT. The tensor itself is an
998XLA nested tuple containing N elements. Each element of the nested tuple is a
999tuple of rank 1 tensors. Each tensor either contains indices (DT_INT32) for
1000embedding lookup or weights (DT_FLOAT) to apply to the output of the embedding
1001lookup operation.
1002  }];
1003
1004  let arguments = (ins
1005    StrAttr:$config
1006  );
1007
1008  let results = (outs
1009    TF_VariantTensor:$output
1010  );
1011}
1012
1013def TF_XlaShardingOp : TF_Op<"XlaSharding", [NoSideEffect]> {
1014  let summary = [{
1015An op which shards the input based on the given sharding attribute.
1016  }];
1017
1018  let arguments = (ins
1019    TF_Tensor:$input,
1020
1021    DefaultValuedAttr<StrAttr, "">:$sharding,
1022    OptionalAttr<StrAttr>:$_XlaSharding
1023  );
1024
1025  let results = (outs
1026    TF_Tensor:$output
1027  );
1028
1029  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1030}
1031
1032def TF_InfeedDequeueTupleOp : TF_Op<"InfeedDequeueTuple", []> {
1033  let summary = "Fetches multiple values from infeed as an XLA tuple.";
1034
1035  let arguments = (ins
1036    OptionalAttr<StrAttr>:$_XlaSharding
1037  );
1038
1039  let results = (outs
1040    Variadic<TF_Tensor>:$outputs
1041  );
1042
1043  TF_DerivedResultShapeListAttr shapes = TF_DerivedResultShapeListAttr<0>;
1044  TF_DerivedResultTypeListAttr dtypes = TF_DerivedResultTypeListAttr<0>;
1045}
1046
1047def TF_StringFormatOp : TF_Op<"StringFormat", [NoSideEffect]> {
1048  let summary = "Formats a string template using a list of tensors.";
1049
1050  let description = [{
1051Formats a string template using a list of tensors, pretty-printing tensor summaries.
1052  }];
1053
1054  let arguments = (ins
1055    Variadic<TF_Tensor>:$inputs,
1056
1057    DefaultValuedAttr<StrAttr, "%s">:$strtemplate,
1058    DefaultValuedAttr<StrAttr, "%s">:$placeholder,
1059    DefaultValuedAttr<I64Attr, "3">:$summarize
1060  );
1061
1062  let results = (outs
1063    TF_StrTensor:$output
1064  );
1065
1066  TF_DerivedOperandTypeListAttr T = TF_DerivedOperandTypeListAttr<0>;
1067}
1068
1069//===----------------------------------------------------------------------===//
1070// tf.data ops
1071//===----------------------------------------------------------------------===//
1072
1073def TF_BatchDatasetV2Op : TF_Op<"BatchDatasetV2", [NoSideEffect]> {
1074  let summary = [{
1075Creates a dataset that batches `batch_size` elements from `input_dataset`.
1076  }];
1077
1078  let arguments = (ins
1079    TF_VariantTensor:$input_dataset,
1080    TF_Int64Tensor:$batch_size,
1081    TF_BoolTensor:$drop_remainder,
1082
1083    DefaultValuedAttr<BoolAttr, "false">:$parallel_copy,
1084    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
1085    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
1086  );
1087
1088  let results = (outs
1089    TF_VariantTensor:$handle
1090  );
1091}
1092
1093def TF_MapDatasetOp : TF_Op<"MapDataset", [NoSideEffect]> {
1094  let summary = [{
1095    Creates a dataset that applies `f` to the outputs of `input_dataset`.
1096  }];
1097
1098  let arguments = (ins
1099    TF_VariantTensor:$input_dataset,
1100    Variadic<TF_Tensor>:$other_arguments,
1101
1102    SymbolRefAttr:$f,
1103    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
1104    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
1105    DefaultValuedAttr<BoolAttr, "true">:$use_inter_op_parallelism,
1106    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
1107  );
1108
1109  let results = (outs
1110    TF_VariantTensor:$handle
1111  );
1112
1113  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
1114}
1115
1116def TF_MapAndBatchDatasetOp : TF_Op<"MapAndBatchDataset", [NoSideEffect]> {
1117  let summary = "Creates a dataset that fuses mapping with batching.";
1118
1119  let description = [{
1120Creates a dataset that applies `f` to the outputs of `input_dataset` and then
1121batches `batch_size` of them.
1122
1123Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up
1124to `batch_size * num_parallel_batches` copies of `f` in parallel.
1125  }];
1126
1127  let arguments = (ins
1128    TF_VariantTensor:$input_dataset,
1129    Variadic<TF_Tensor>:$other_arguments,
1130    TF_Int64Tensor:$batch_size,
1131    TF_Int64Tensor:$num_parallel_calls,
1132    TF_BoolTensor:$drop_remainder,
1133
1134    SymbolRefAttr:$f,
1135    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
1136    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
1137    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
1138  );
1139
1140  let results = (outs
1141    TF_VariantTensor:$handle
1142  );
1143
1144  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
1145}
1146
1147def TF_ParallelMapDatasetOp : TF_Op<"ParallelMapDataset", [NoSideEffect]> {
1148  let summary = [{
1149    Creates a dataset that applies `f` to the outputs of `input_dataset`.
1150  }];
1151
1152  let description = [{
1153    Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes
1154    up to `num_parallel_calls` copies of `f` in parallel.
1155  }];
1156
1157  let arguments = (ins
1158    TF_VariantTensor:$input_dataset,
1159    Variadic<TF_Tensor>:$other_arguments,
1160    TF_Int32Tensor:$num_parallel_calls,
1161
1162    SymbolRefAttr:$f,
1163    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
1164    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes,
1165    DefaultValuedAttr<BoolAttr, "true">:$use_inter_op_parallelism,
1166    DefaultValuedAttr<BoolAttr, "false">:$sloppy,
1167    DefaultValuedAttr<BoolAttr, "false">:$preserve_cardinality
1168  );
1169
1170  let results = (outs
1171    TF_VariantTensor:$handle
1172  );
1173
1174  TF_DerivedOperandTypeListAttr Targuments = TF_DerivedOperandTypeListAttr<1>;
1175}
1176
1177def TF_TensorSliceDatasetOp : TF_Op<"TensorSliceDataset", []> {
1178  let summary = [{
1179    Creates a dataset that emits each dim-0 slice of `components` once.
1180  }];
1181
1182  let arguments = (ins
1183    Variadic<TF_Tensor>:$components,
1184    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
1185  );
1186
1187  let results = (outs
1188    TF_VariantTensor:$handle
1189  );
1190
1191  TF_DerivedOperandTypeListAttr Toutput_types = TF_DerivedOperandTypeListAttr<0>;
1192}
1193
1194def TF_ToBoolOp : TF_Op<"ToBool", [NoSideEffect]> {
1195  let summary = "Converts a tensor to a scalar predicate.";
1196
1197  let description = [{
1198Converts a tensor to a scalar predicate with the following rules:
1199
1200- For 0D tensors, truthiness is determined by comparing against a "zero"
1201  value. For numerical types it is the obvious zero. For strings it is the
1202  empty string.
1203
1204- For >0D tensors, truthiness is determined by looking at the number of
1205  elements. If has zero elements, then the result is false. Otherwise the
1206  result is true.
1207
1208This matches the behavior of If and While for determining if a tensor counts
1209as true/false for a branch condition.
1210  }];
1211
1212  let arguments = (ins
1213    TF_Tensor:$input
1214  );
1215
1216  let results = (outs
1217    0DTensorOf<[I1]>:$output
1218  );
1219
1220  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1221
1222  let builders = [
1223    OpBuilderDAG<(ins "Value":$value),
1224    [{
1225      build($_builder, $_state, RankedTensorType::get({}, $_builder.getI1Type()),
1226            value);
1227    }]>];
1228
1229  let hasCanonicalizer = 1;
1230}
1231
1232def TF_BesselI0eOp : TF_Op<"BesselI0e", [NoSideEffect, SameOperandsAndResultType]> {
1233  let summary = "Computes the Bessel i0e function of `x` element-wise.";
1234
1235  let description = [{
1236Exponentially scaled modified Bessel function of order 0 defined as
1237`bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
1238
1239This function is faster and numerically stabler than `bessel_i0(x)`.
1240  }];
1241
1242  let arguments = (ins
1243    TF_FloatTensor:$x
1244  );
1245
1246  let results = (outs
1247    TF_FloatTensor:$y
1248  );
1249
1250  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1251}
1252
1253def TF_BesselI1eOp : TF_Op<"BesselI1e", [NoSideEffect, SameOperandsAndResultType]> {
1254  let summary = "Computes the Bessel i1e function of `x` element-wise.";
1255
1256  let description = [{
1257Exponentially scaled modified Bessel function of order 0 defined as
1258`bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
1259
1260This function is faster and numerically stabler than `bessel_i1(x)`.
1261  }];
1262
1263  let arguments = (ins
1264    TF_FloatTensor:$x
1265  );
1266
1267  let results = (outs
1268    TF_FloatTensor:$y
1269  );
1270
1271  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1272}
1273
1274def TF_TPUPartitionedCallOp : TF_Op<"TPUPartitionedCall", [CallOpInterface]> {
1275  let summary = "Calls a function placed on a specified TPU device.";
1276
1277  let arguments = (ins
1278    Variadic<TF_Tensor>:$args,
1279    TF_Int32Tensor:$device_ordinal,
1280
1281    SymbolRefAttr:$f,
1282    DefaultValuedAttr<I64Attr, "0">:$autotuner_thresh
1283  );
1284
1285  let results = (outs
1286    Variadic<TF_Tensor>:$output
1287  );
1288
1289  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
1290  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
1291
1292  let extraClassDeclaration = [{
1293    // Gets the argument operands to the called function.
1294    operand_range getArgOperands() { return args(); }
1295
1296    // Returns the callee of this operation.
1297    CallInterfaceCallable getCallableForCallee() { return fAttr(); }
1298
1299    // returns the callee of this operation.
1300    FuncOp func() {
1301      return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
1302    }
1303  }];
1304
1305  let verifier = [{ return VerifyPartitionedCall(*this); }];
1306}
1307
1308def TF_BatchFunctionOp : TF_Op<"BatchFunction", [AttrSizedOperandSegments]> {
1309  let summary = [{
1310Batches all the inputs tensors to the computation done by the function.
1311  }];
1312
1313  let description = [{
1314So, for example, in the following code
1315
1316  ```python
1317
1318  # This input will be captured.
1319  y = tf.placeholder_with_default(1.0, shape=[])
1320
1321  @tf.Defun(tf.float32)
1322  def computation(a):
1323    return tf.matmul(a, a) + y
1324
1325  b = gen_batch_ops.batch_function(
1326          f=computation
1327          in_tensors=[a],
1328          captured_tensors=computation.captured_inputs,
1329          Tout=[o.type for o in computation.definition.signature.output_arg],
1330          num_batch_threads=1,
1331          max_batch_size=10,
1332          batch_timeout_micros=100000,  # 100ms
1333          allowed_batch_sizes=[3, 10],
1334          batching_queue="")
1335  ```
1336
1337If more than one session.run call is simultaneously trying to compute `b`
1338the values of `a` will be gathered, non-deterministically concatenated
1339along the first axis, and only one thread will run the computation.
1340
1341Assumes that all arguments of the function are Tensors which will be batched
1342along their first dimension.
1343
1344Arguments that are captured, are not batched. The session.run call which does
1345the concatenation, will use the values of the captured tensors available to it.
1346Therefore, typical uses of captured tensors should involve values which remain
1347unchanged across session.run calls. Inference is a good example of this.
1348
1349SparseTensor is not supported. The return value of the decorated function
1350must be a Tensor or a list/tuple of Tensors.
1351  }];
1352
1353  let arguments = (ins
1354    Variadic<TF_Tensor>:$in_tensors,
1355    Variadic<TF_Tensor>:$captured_tensors,
1356
1357    SymbolRefAttr:$f,
1358    I64Attr:$num_batch_threads,
1359    I64Attr:$max_batch_size,
1360    I64Attr:$batch_timeout_micros,
1361    DefaultValuedAttr<I64Attr, "10">:$max_enqueued_batches,
1362    DefaultValuedAttr<I64ArrayAttr, "{}">:$allowed_batch_sizes,
1363    StrAttr:$container,
1364    StrAttr:$shared_name,
1365    StrAttr:$batching_queue,
1366    DefaultValuedAttr<BoolAttr, "false">:$enable_large_batch_splitting,
1367    I32ElementsAttr:$operand_segment_sizes
1368  );
1369
1370  let results = (outs
1371    Variadic<TF_Tensor>:$out_tensors
1372  );
1373
1374  TF_DerivedOperandTypeListAttr Tin = TF_DerivedOperandTypeListAttr<0>;
1375  TF_DerivedOperandTypeListAttr Tcaptured = TF_DerivedOperandTypeListAttr<1>;
1376  TF_DerivedResultTypeListAttr Tout = TF_DerivedResultTypeListAttr<0>;
1377}
1378
1379def TF_AddV2Op : TF_Op<"AddV2", [Commutative, NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary, TF_LayoutAgnostic, TF_SameOperandsAndResultElementTypeResolveRef]>,
1380                 WithBroadcastableBinOpBuilder {
1381  let summary = "Returns x + y element-wise.";
1382
1383  let description = [{
1384*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
1385[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1386  }];
1387
1388  let arguments = (ins
1389    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint8, TF_Uint16, TF_Uint32, TF_Uint64]>:$x,
1390    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint8, TF_Uint16, TF_Uint32, TF_Uint64]>:$y
1391  );
1392
1393  let results = (outs
1394    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint8, TF_Uint16, TF_Uint32, TF_Uint64]>:$z
1395  );
1396
1397  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1398
1399  let hasCanonicalizer = 1;
1400
1401  let hasFolder = 1;
1402}
1403
1404def TF_DivNoNanOp : TF_Op<"DivNoNan", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
1405                    WithBroadcastableBinOpBuilder {
1406  let summary = "Returns 0 if the denominator is zero.";
1407
1408  let description = [{
1409*NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
1410[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1411  }];
1412
1413  let arguments = (ins
1414    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Complex]>:$x,
1415    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Complex]>:$y
1416  );
1417
1418  let results = (outs
1419    TensorOf<[TF_Float16, TF_Float32, TF_Float64, TF_Complex]>:$z
1420  );
1421
1422  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1423}
1424
1425def TF_MaximumOp : TF_Op<"Maximum", [NoSideEffect, ResultsBroadcastableShape, TF_SameOperandsAndResultElementTypeResolveRef]>,
1426                   WithBroadcastableBinOpBuilder {
1427  let summary = "Returns the max of x and y (i.e. x > y ? x : y) element-wise.";
1428
1429  let description = [{
1430*NOTE*: `Maximum` supports broadcasting. More about broadcasting
1431[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1432  }];
1433
1434  let arguments = (ins
1435    TensorOf<[TF_Float, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$x,
1436    TensorOf<[TF_Float, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$y
1437  );
1438
1439  let results = (outs
1440    TensorOf<[TF_Float, TF_Int16, TF_Int32, TF_Int64, TF_Uint8]>:$z
1441  );
1442
1443  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1444}
1445
1446def TF_RealDivOp : TF_Op<"RealDiv", [NoSideEffect, ResultsBroadcastableShape, TF_CwiseBinary]>,
1447                   WithBroadcastableBinOpBuilder {
1448  let summary = "Returns x / y element-wise for real types.";
1449
1450  let description = [{
1451If `x` and `y` are reals, this will return the floating-point division.
1452
1453*NOTE*: `Div` supports broadcasting. More about broadcasting
1454[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1455  }];
1456
1457  let arguments = (ins
1458    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint16, TF_Uint8]>:$x,
1459    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint16, TF_Uint8]>:$y
1460  );
1461
1462  let results = (outs
1463    TensorOf<[TF_Float, TF_SInt, TF_Complex, TF_Uint16, TF_Uint8]>:$z
1464  );
1465
1466  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1467
1468  let hasCanonicalizer = 1;
1469
1470  let hasFolder = 1;
1471}
1472
1473def TF_AddOp : TF_Op<"Add", [NoSideEffect, ResultsBroadcastableShape, TF_LayoutAgnostic, TF_SameOperandsAndResultElementTypeResolveRef]>,
1474               WithBroadcastableBinOpBuilder {
1475  let summary = "Returns x + y element-wise.";
1476
1477  let description = [{
1478*NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
1479[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1480
1481Given two input tensors, the `tf.add` operation computes the sum for every element in the tensor.
1482
1483Both input and output have a range `(-inf, inf)`.
1484  }];
1485
1486  let arguments = (ins
1487    TensorOf<[TF_NumberNotQuantizedOrStr]>:$x,
1488    TensorOf<[TF_NumberNotQuantizedOrStr]>:$y
1489  );
1490
1491  let results = (outs
1492    TensorOf<[TF_NumberNotQuantizedOrStr]>:$z
1493  );
1494
1495  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
1496
1497  let hasCanonicalizer = 1;
1498}
1499
1500def TF_StatefulStandardNormalV2Op : TF_Op<"StatefulStandardNormalV2", []> {
1501  let summary = "Outputs random values from a normal distribution.";
1502
1503  let description = [{
1504The generated values will have mean 0 and standard deviation 1.
1505  }];
1506
1507  let arguments = (ins
1508    Arg<TF_ResourceTensor, "", [TF_VariableRead,TF_VariableWrite]>:$resource,
1509    TF_Int64Tensor:$algorithm,
1510    TF_I32OrI64Tensor:$shape
1511  );
1512
1513  let results = (outs
1514    TF_FloatTensor:$output
1515  );
1516
1517  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
1518  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
1519}
1520
1521def TF_StatefulTruncatedNormalOp : TF_Op<"StatefulTruncatedNormal", []> {
1522  let summary = "Outputs random values from a truncated normal distribution.";
1523
1524  let description = [{
1525The generated values follow a normal distribution with mean 0 and standard
1526deviation 1, except that values whose magnitude is more than 2 standard
1527deviations from the mean are dropped and re-picked.
1528  }];
1529
1530  let arguments = (ins
1531    Arg<TF_ResourceTensor, "", [TF_VariableRead,TF_VariableWrite]>:$resource,
1532    TF_Int64Tensor:$algorithm,
1533    TF_I32OrI64Tensor:$shape
1534  );
1535
1536  let results = (outs
1537    TF_FloatTensor:$output
1538  );
1539
1540  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
1541  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
1542}
1543
1544def TF_StatefulUniformOp : TF_Op<"StatefulUniform", []> {
1545  let summary = "Outputs random values from a uniform distribution.";
1546
1547  let description = [{
1548The generated values follow a uniform distribution in the range `[0, 1)`. The
1549lower bound 0 is included in the range, while the upper bound 1 is excluded.
1550  }];
1551
1552  let arguments = (ins
1553    Arg<TF_ResourceTensor, "", [TF_VariableRead,TF_VariableWrite]>:$resource,
1554    TF_Int64Tensor:$algorithm,
1555    TF_I32OrI64Tensor:$shape
1556  );
1557
1558  let results = (outs
1559    TF_FloatTensor:$output
1560  );
1561
1562  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
1563  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
1564}
1565
1566def TF_StatefulUniformFullIntOp : TF_Op<"StatefulUniformFullInt", []> {
1567  let summary = "Outputs random integers from a uniform distribution.";
1568
1569  let description = [{
1570The generated values are uniform integers covering the whole range of `dtype`.
1571  }];
1572
1573  let arguments = (ins
1574    Arg<TF_ResourceTensor, "", [TF_VariableRead,TF_VariableWrite]>:$resource,
1575    TF_Int64Tensor:$algorithm,
1576    TF_I32OrI64Tensor:$shape
1577  );
1578
1579  let results = (outs
1580    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$output
1581  );
1582
1583  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
1584  TF_DerivedResultTypeAttr dtype = TF_DerivedResultTypeAttr<0>;
1585}
1586
1587// TODO(lyandy): Investigate supported dtypes (`minval`, `maxval`, `output`) for
1588// `tf.StatefulUniformInt`. tf2xla kernels support i32, i64, ui32, and ui64
1589// while TensorFlow CPU/GPU kernels only support i32 and i64.
1590def TF_StatefulUniformIntOp : TF_Op<"StatefulUniformInt", []> {
1591  let summary = "Outputs random integers from a uniform distribution.";
1592
1593  let description = [{
1594The generated values are uniform integers in the range `[minval, maxval)`.
1595The lower bound `minval` is included in the range, while the upper bound
1596`maxval` is excluded.
1597
1598The random integers are slightly biased unless `maxval - minval` is an exact
1599power of two.  The bias is small for values of `maxval - minval` significantly
1600smaller than the range of the output (either `2^32` or `2^64`).
1601  }];
1602
1603  let arguments = (ins
1604    Arg<TF_ResourceTensor, "", [TF_VariableRead,TF_VariableWrite]>:$resource,
1605    TF_Int64Tensor:$algorithm,
1606    TF_I32OrI64Tensor:$shape,
1607    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$minval,
1608    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$maxval
1609  );
1610
1611  let results = (outs
1612    TensorOf<[TF_Int32, TF_Int64, TF_Uint32, TF_Uint64]>:$output
1613  );
1614
1615  TF_DerivedOperandTypeAttr shape_dtype = TF_DerivedOperandTypeAttr<2>;
1616  TF_DerivedOperandTypeAttr dtype = TF_DerivedOperandTypeAttr<3>;
1617}
1618
1619def TF_CloseSummaryWriterOp : TF_Op<"CloseSummaryWriter", []> {
1620  let summary = "Flushes and closes the summary writer.";
1621
1622  let description = [{
1623Also removes it from the resource manager. To reopen, use another
1624CreateSummaryFileWriter op.
1625
1626writer: A handle to the summary writer resource.
1627  }];
1628
1629  let arguments = (ins
1630    Arg<TF_ResourceTensor, "", [TF_SummaryFree]>:$writer
1631  );
1632
1633  let results = (outs);
1634}
1635
1636// TODO(b/168035831): Model db_uri read/write.
1637def TF_CreateSummaryDbWriterOp : TF_Op<"CreateSummaryDbWriter", []> {
1638  let summary = "Creates summary database writer accessible by given resource handle.";
1639
1640  let description = [{
1641This can be used to write tensors from the execution graph directly
1642to a database. Only SQLite is supported right now. This function
1643will create the schema if it doesn't exist. Entries in the Users,
1644Experiments, and Runs tables will be created automatically if they
1645don't already exist.
1646
1647writer: Handle to SummaryWriter resource to overwrite.
1648db_uri: For example "file:/tmp/foo.sqlite".
1649experiment_name: Can't contain ASCII control characters or <>. Case
1650  sensitive. If empty, then the Run will not be associated with any
1651  Experiment.
1652run_name: Can't contain ASCII control characters or <>. Case sensitive.
1653  If empty, then each Tag will not be associated with any Run.
1654user_name: Must be valid as both a DNS label and Linux username. If
1655  empty, then the Experiment will not be associated with any User.
1656  }];
1657
1658  let arguments = (ins
1659    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1660    TF_StrTensor:$db_uri,
1661    TF_StrTensor:$experiment_name,
1662    TF_StrTensor:$run_name,
1663    TF_StrTensor:$user_name
1664  );
1665
1666  let results = (outs);
1667}
1668
1669// TODO(b/168035831): Model logdir read/write.
1670def TF_CreateSummaryFileWriterOp : TF_Op<"CreateSummaryFileWriter", []> {
1671  let summary = "Creates a summary file writer accessible by the given resource handle.";
1672
1673  let description = [{
1674writer: A handle to the summary writer resource
1675logdir: Directory where the event file will be written.
1676max_queue: Size of the queue of pending events and summaries.
1677flush_millis: How often, in milliseconds, to flush the pending events and
1678  summaries to disk.
1679filename_suffix: Every event file's name is suffixed with this suffix.
1680  }];
1681
1682  let arguments = (ins
1683    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1684    TF_StrTensor:$logdir,
1685    TF_Int32Tensor:$max_queue,
1686    TF_Int32Tensor:$flush_millis,
1687    TF_StrTensor:$filename_suffix
1688  );
1689
1690  let results = (outs);
1691}
1692
1693def TF_FlushSummaryWriterOp : TF_Op<"FlushSummaryWriter", []> {
1694  let summary = "Flushes the writer's unwritten events.";
1695
1696  let description = [{
1697writer: A handle to the summary writer resource.
1698  }];
1699
1700  let arguments = (ins
1701    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer
1702  );
1703
1704  let results = (outs);
1705}
1706
1707def TF_ImportEventOp : TF_Op<"ImportEvent", []> {
1708  let summary = "Outputs a `tf.Event` protocol buffer.";
1709
1710  let description = [{
1711When CreateSummaryDbWriter is being used, this op can be useful for
1712importing data from event logs.
1713
1714writer: A handle to a summary writer.
1715event: A string containing a binary-encoded tf.Event proto.
1716  }];
1717
1718  let arguments = (ins
1719    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1720    TF_StrTensor:$event
1721  );
1722
1723  let results = (outs);
1724}
1725
1726def TF_SummaryWriterOp : TF_Op<"SummaryWriter", [TF_ResourceHandleAllocatorInterface]> {
1727  let summary = "Returns a handle to be used to access a summary writer.";
1728
1729  let description = [{
1730The summary writer is an in-graph resource which can be used by ops to write
1731summaries to event files.
1732
1733writer: the summary writer resource. Scalar handle.
1734  }];
1735
1736  let arguments = (ins
1737    StrAttr:$shared_name,
1738    StrAttr:$container
1739  );
1740
1741  let results = (outs
1742    Res<TF_ResourceTensor, "", [TF_SummaryAlloc]>:$writer
1743  );
1744
1745  let extraClassDeclaration = [{
1746    // TF_ResourceHandleAllocatorInterface:
1747    ResourceHandleValueAndId GetResourceHandleValueAndId(
1748      llvm::SmallDenseMap<ResourceHandle, int64_t> &resource_handle_id_map,
1749      int64_t &next_id);
1750  }];
1751}
1752
1753def TF_WriteAudioSummaryOp : TF_Op<"WriteAudioSummary", []> {
1754  let summary = "Writes a `Summary` protocol buffer with audio.";
1755
1756  let description = [{
1757The summary has up to `max_outputs` summary values containing audio. The
1758audio is built from `tensor` which must be 3-D with shape `[batch_size,
1759frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
1760assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`.
1761
1762The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
1763build the `tag` of the summary values:
1764
1765*  If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
1766*  If `max_outputs` is greater than 1, the summary value tags are
1767   generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
1768
1769writer: A handle to a summary writer.
1770step: The step to write the summary for.
1771tag: Scalar. Used to build the `tag` attribute of the summary values.
1772tensor: 2-D of shape `[batch_size, frames]`.
1773sample_rate: The sample rate of the signal in hertz.
1774max_outputs: Max number of batch elements to generate audio for.
1775  }];
1776
1777  let arguments = (ins
1778    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1779    TF_Int64Tensor:$step,
1780    TF_StrTensor:$tag,
1781    TF_Float32Tensor:$tensor,
1782    TF_Float32Tensor:$sample_rate,
1783
1784    Confined<DefaultValuedAttr<I64Attr, "3">, [IntMinValue<1>]>:$max_outputs
1785  );
1786
1787  let results = (outs);
1788}
1789
1790def TF_WriteGraphSummaryOp : TF_Op<"WriteGraphSummary", []> {
1791  let summary = "Writes a `GraphDef` protocol buffer to a `SummaryWriter`.";
1792
1793  let description = [{
1794writer: Handle of `SummaryWriter`.
1795step: The step to write the summary for.
1796tensor: A scalar string of the serialized tf.GraphDef proto.
1797  }];
1798
1799  let arguments = (ins
1800    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1801    TF_Int64Tensor:$step,
1802    TF_StrTensor:$tensor
1803  );
1804
1805  let results = (outs);
1806}
1807
1808def TF_WriteHistogramSummaryOp : TF_Op<"WriteHistogramSummary", []> {
1809  let summary = "Writes a histogram summary.";
1810
1811  let description = [{
1812The generated
1813[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
1814has one summary value containing a histogram for `values`.
1815
1816This op reports an `InvalidArgument` error if any value is not finite.
1817
1818writer: A handle to a summary writer.
1819step: The step to write the summary for.
1820tag: Scalar.  Tag to use for the `Summary.Value`.
1821values: Any shape. Values to use to build the histogram.
1822  }];
1823
1824  let arguments = (ins
1825    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1826    TF_Int64Tensor:$step,
1827    TF_StrTensor:$tag,
1828    TF_IntOrFpTensor:$values
1829  );
1830
1831  let results = (outs);
1832
1833  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
1834}
1835
1836def TF_WriteImageSummaryOp : TF_Op<"WriteImageSummary", []> {
1837  let summary = "Writes a `Summary` protocol buffer with images.";
1838
1839  let description = [{
1840The summary has up to `max_images` summary values containing images. The
1841images are built from `tensor` which must be 4-D with shape `[batch_size,
1842height, width, channels]` and where `channels` can be:
1843
1844*  1: `tensor` is interpreted as Grayscale.
1845*  3: `tensor` is interpreted as RGB.
1846*  4: `tensor` is interpreted as RGBA.
1847
1848The images have the same number of channels as the input tensor. For float
1849input, the values are normalized one image at a time to fit in the range
1850`[0, 255]`.  `uint8` values are unchanged.  The op uses two different
1851normalization algorithms:
1852
1853*  If the input values are all positive, they are rescaled so the largest one
1854   is 255.
1855
1856*  If any input value is negative, the values are shifted so input value 0.0
1857   is at 127.  They are then rescaled so that either the smallest value is 0,
1858   or the largest one is 255.
1859
1860The `tag` argument is a scalar `Tensor` of type `string`.  It is used to
1861build the `tag` of the summary values:
1862
1863*  If `max_images` is 1, the summary value tag is '*tag*/image'.
1864*  If `max_images` is greater than 1, the summary value tags are
1865   generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
1866
1867The `bad_color` argument is the color to use in the generated images for
1868non-finite input values.  It is a `unit8` 1-D tensor of length `channels`.
1869Each element must be in the range `[0, 255]` (It represents the value of a
1870pixel in the output image).  Non-finite values in the input tensor are
1871replaced by this tensor in the output image.  The default value is the color
1872red.
1873
1874writer: A handle to a summary writer.
1875step: The step to write the summary for.
1876tag: Scalar. Used to build the `tag` attribute of the summary values.
1877tensor: 4-D of shape `[batch_size, height, width, channels]` where
1878  `channels` is 1, 3, or 4.
1879max_images: Max number of batch elements to generate images for.
1880bad_color: Color to use for pixels with non-finite values.
1881  }];
1882
1883  let arguments = (ins
1884    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1885    TF_Int64Tensor:$step,
1886    TF_StrTensor:$tag,
1887    TensorOf<[TF_Float16, TF_Float32, TF_Uint8]>:$tensor,
1888    TF_Uint8Tensor:$bad_color,
1889
1890    Confined<DefaultValuedAttr<I64Attr, "3">, [IntMinValue<1>]>:$max_images
1891  );
1892
1893  let results = (outs);
1894
1895  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
1896}
1897
1898def TF_WriteRawProtoSummaryOp : TF_Op<"WriteRawProtoSummary", []> {
1899  let summary = "Writes a `Summary` protocol buffer with serialized string `Summary` protocol buffers.";
1900
1901  let description = [{
1902writer: A handle to a summary writer.
1903step: The step to write the summary for.
1904tensor: A tensor holding one or more serialized `Summary` protobufs to write.
1905  }];
1906
1907  let arguments = (ins
1908    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1909    TF_Int64Tensor:$step,
1910    TF_StrTensor:$tensor
1911  );
1912
1913  let results = (outs);
1914}
1915
1916def TF_WriteScalarSummaryOp : TF_Op<"WriteScalarSummary", []> {
1917  let summary = "Writes a `Summary` protocol buffer with scalar values.";
1918
1919  let description = [{
1920The input `tag` and `value` must have the scalars.
1921
1922writer: A handle to a summary writer.
1923step: The step to write the summary for.
1924tag: Tag for the summary.
1925value: Value for the summary.
1926  }];
1927
1928  let arguments = (ins
1929    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1930    TF_Int64Tensor:$step,
1931    TF_StrTensor:$tag,
1932    TF_IntOrFpTensor:$value
1933  );
1934
1935  let results = (outs);
1936
1937  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<3>;
1938}
1939
1940def TF_WriteSummaryOp : TF_Op<"WriteSummary", []> {
1941  let summary = "Outputs a `Summary` protocol buffer with a tensor.";
1942
1943  let description = [{
1944writer: A handle to a summary writer.
1945step: The step to write the summary for.
1946tensor: A tensor to serialize.
1947tag: The summary's tag.
1948summary_metadata: Serialized SummaryMetadata protocol buffer containing
1949 plugin-related metadata for this summary.
1950  }];
1951
1952  let arguments = (ins
1953    Arg<TF_ResourceTensor, "", [TF_SummaryWrite]>:$writer,
1954    TF_Int64Tensor:$step,
1955    TF_Tensor:$tensor,
1956    TF_StrTensor:$tag,
1957    TF_StrTensor:$summary_metadata
1958  );
1959
1960  let results = (outs);
1961
1962  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<2>;
1963}
1964
1965def TF_InitializeTableFromDatasetOp : TF_Op<"InitializeTableFromDataset", []> {
1966  let summary = "";
1967
1968  let arguments = (ins
1969    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
1970    TF_VariantTensor:$dataset
1971  );
1972
1973  let results = (outs);
1974}
1975
1976// TODO(b/168035831): Model filename read.
1977def TF_InitializeTableFromTextFileV2Op : TF_Op<"InitializeTableFromTextFileV2", []> {
1978  let summary = "Initializes a table from a text file.";
1979
1980  let description = [{
1981It inserts one key-value pair into the table for each line of the file.
1982The key and value is extracted from the whole line content, elements from the
1983split line based on `delimiter` or the line number (starting from zero).
1984Where to extract the key and value from a line is specified by `key_index` and
1985`value_index`.
1986
1987- A value of -1 means use the line number(starting from zero), expects `int64`.
1988- A value of -2 means use the whole line content, expects `string`.
1989- A value >= 0 means use the index (starting at zero) of the split line based
1990  on `delimiter`.
1991  }];
1992
1993  let arguments = (ins
1994    Arg<TF_ResourceTensor, "", [TF_LookupTableWrite]>:$table_handle,
1995    TF_StrTensor:$filename,
1996
1997    Confined<I64Attr, [IntMinValue<-2>]>:$key_index,
1998    Confined<I64Attr, [IntMinValue<-2>]>:$value_index,
1999    Confined<DefaultValuedAttr<I64Attr, "-1">, [IntMinValue<-1>]>:$vocab_size,
2000    DefaultValuedAttr<StrAttr, "\t">:$delimiter
2001  );
2002
2003  let results = (outs);
2004}
2005
2006// TODO(b/168035831): Model filename read.
2007def TF_CacheDatasetV2Op : TF_Op<"CacheDatasetV2", []> {
2008  let summary = "";
2009
2010  let arguments = (ins
2011    TF_VariantTensor:$input_dataset,
2012    TF_StrTensor:$filename,
2013    Arg<TF_ResourceTensor, "", [TF_DatasetMemoryCacheRead, TF_DatasetMemoryCacheWrite]>:$cache,
2014
2015    Confined<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types,
2016    Confined<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes
2017  );
2018
2019  let results = (outs
2020    TF_VariantTensor:$handle
2021  );
2022}
2023
2024def TF__TPUDeviceOrdinalPlaceholderOp : TF_Op<"_TPUDeviceOrdinalPlaceholder", []> {
2025  let summary = [{
2026Placeholder device ordinal that represents device ordinal of a replicated op.
2027  }];
2028
2029  let description = [{
2030This op can be used when certain rewrite passes materialize ops that require a
2031device ordinal of a replicated op but replication logic has been abstracted away
2032using tf_device.replicate op. Subsequent rewrite passes must replace this op with
2033a constant output that represents the correct device ordinal of the replicated
2034operations inside a TPU host.
2035  }];
2036
2037  let arguments = (ins);
2038
2039  let results = (outs
2040    TF_Int64Tensor:$device_ordinal
2041  );
2042}
2043
2044def TF_AssignOp : TF_Op<"Assign", []> {
2045  let summary = "Update 'ref' by assigning 'value' to it.";
2046
2047  let description = [{
2048This operation outputs "ref" after the assignment is done.
2049This makes it easier to chain operations that need to use the reset value.
2050
2051This is a side-effecting operation because it will change the value of its
2052argument "ref" in addition to returning the results.
2053  }];
2054
2055  let arguments = (ins
2056    TF_Tensor:$ref,
2057    TF_Tensor:$value,
2058
2059    DefaultValuedAttr<BoolAttr, "true">:$validate_shape,
2060    DefaultValuedAttr<BoolAttr, "true">:$use_locking
2061  );
2062
2063  let results = (outs
2064    TF_Tensor:$output_ref
2065  );
2066
2067  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2068}
2069
2070def TF_TPUPartitionedInputOp : TF_Op<"TPUPartitionedInput", [NoSideEffect]> {
2071  let summary = [{
2072An op that groups a list of partitioned inputs together. This op
2073  }];
2074
2075  let arguments = (ins
2076    Variadic<TF_Tensor>:$inputs,
2077
2078    DefaultValuedAttr<I64Attr, "0">:$partition_dim,
2079    OptionalAttr<StrAttr>:$_XlaSharding
2080  );
2081
2082  let results = (outs
2083    TF_Tensor:$output
2084  );
2085
2086  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2087  TF_DerivedOperandSizeAttr N = TF_DerivedOperandSizeAttr<0>;
2088}
2089
2090def TF_TPUPartitionedOutputOp : TF_Op<"TPUPartitionedOutput", [NoSideEffect]> {
2091  let summary = [{
2092An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
2093  }];
2094
2095  let description = [{
2096outputs outside the XLA computation.
2097  }];
2098
2099  let arguments = (ins
2100    TF_Tensor:$inputs,
2101
2102    DefaultValuedAttr<I64Attr, "0">:$partition_dim,
2103    OptionalAttr<StrAttr>:$_XlaSharding
2104  );
2105
2106  let results = (outs
2107    Variadic<TF_Tensor>:$output
2108  );
2109
2110  TF_DerivedOperandTypeAttr T = TF_DerivedOperandTypeAttr<0>;
2111  TF_DerivedResultSizeAttr num_splits = TF_DerivedResultSizeAttr<0>;
2112}
2113
2114#endif // TF_OPS
2115